1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5
6 #define pr_fmt(fmt) "xive: " fmt
7
8 #include <linux/types.h>
9 #include <linux/irq.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/cpumask.h>
20 #include <linux/mm.h>
21 #include <linux/kmemleak.h>
22
23 #include <asm/machdep.h>
24 #include <asm/prom.h>
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/irq.h>
28 #include <asm/errno.h>
29 #include <asm/xive.h>
30 #include <asm/xive-regs.h>
31 #include <asm/opal.h>
32 #include <asm/kvm_ppc.h>
33
34 #include "xive-internal.h"
35
36
37 static u32 xive_provision_size;
38 static u32 *xive_provision_chips;
39 static u32 xive_provision_chip_count;
40 static u32 xive_queue_shift;
41 static u32 xive_pool_vps = XIVE_INVALID_VP;
42 static struct kmem_cache *xive_provision_cache;
43 static bool xive_has_single_esc;
44 static bool xive_has_save_restore;
45
xive_native_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)46 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
47 {
48 __be64 flags, eoi_page, trig_page;
49 __be32 esb_shift, src_chip;
50 u64 opal_flags;
51 s64 rc;
52
53 memset(data, 0, sizeof(*data));
54
55 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
56 &esb_shift, &src_chip);
57 if (rc) {
58 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
59 hw_irq, rc);
60 return -EINVAL;
61 }
62
63 opal_flags = be64_to_cpu(flags);
64 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
65 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
66 if (opal_flags & OPAL_XIVE_IRQ_LSI)
67 data->flags |= XIVE_IRQ_FLAG_LSI;
68 data->eoi_page = be64_to_cpu(eoi_page);
69 data->trig_page = be64_to_cpu(trig_page);
70 data->esb_shift = be32_to_cpu(esb_shift);
71 data->src_chip = be32_to_cpu(src_chip);
72
73 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
74 if (!data->eoi_mmio) {
75 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
76 return -ENOMEM;
77 }
78
79 data->hw_irq = hw_irq;
80
81 if (!data->trig_page)
82 return 0;
83 if (data->trig_page == data->eoi_page) {
84 data->trig_mmio = data->eoi_mmio;
85 return 0;
86 }
87
88 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
89 if (!data->trig_mmio) {
90 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
91 return -ENOMEM;
92 }
93 return 0;
94 }
95 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
96
xive_native_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)97 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
98 {
99 s64 rc;
100
101 for (;;) {
102 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
103 if (rc != OPAL_BUSY)
104 break;
105 msleep(OPAL_BUSY_DELAY_MS);
106 }
107 return rc == 0 ? 0 : -ENXIO;
108 }
109 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
110
xive_native_get_irq_config(u32 hw_irq,u32 * target,u8 * prio,u32 * sw_irq)111 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
112 u32 *sw_irq)
113 {
114 s64 rc;
115 __be64 vp;
116 __be32 lirq;
117
118 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
119
120 *target = be64_to_cpu(vp);
121 *sw_irq = be32_to_cpu(lirq);
122
123 return rc == 0 ? 0 : -ENXIO;
124 }
125
126 #define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
127
128 /* This can be called multiple time to change a queue configuration */
xive_native_configure_queue(u32 vp_id,struct xive_q * q,u8 prio,__be32 * qpage,u32 order,bool can_escalate)129 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
130 __be32 *qpage, u32 order, bool can_escalate)
131 {
132 s64 rc = 0;
133 __be64 qeoi_page_be;
134 __be32 esc_irq_be;
135 u64 flags, qpage_phys;
136
137 /* If there's an actual queue page, clean it */
138 if (order) {
139 if (WARN_ON(!qpage))
140 return -EINVAL;
141 qpage_phys = __pa(qpage);
142 } else
143 qpage_phys = 0;
144
145 /* Initialize the rest of the fields */
146 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
147 q->idx = 0;
148 q->toggle = 0;
149
150 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
151 &qeoi_page_be,
152 &esc_irq_be,
153 NULL);
154 if (rc) {
155 vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
156 rc = -EIO;
157 goto fail;
158 }
159 q->eoi_phys = be64_to_cpu(qeoi_page_be);
160
161 /* Default flags */
162 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
163
164 /* Escalation needed ? */
165 if (can_escalate) {
166 q->esc_irq = be32_to_cpu(esc_irq_be);
167 flags |= OPAL_XIVE_EQ_ESCALATE;
168 }
169
170 /* Configure and enable the queue in HW */
171 for (;;) {
172 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
173 if (rc != OPAL_BUSY)
174 break;
175 msleep(OPAL_BUSY_DELAY_MS);
176 }
177 if (rc) {
178 vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
179 rc = -EIO;
180 } else {
181 /*
182 * KVM code requires all of the above to be visible before
183 * q->qpage is set due to how it manages IPI EOIs
184 */
185 wmb();
186 q->qpage = qpage;
187 }
188 fail:
189 return rc;
190 }
191 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
192
__xive_native_disable_queue(u32 vp_id,struct xive_q * q,u8 prio)193 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
194 {
195 s64 rc;
196
197 /* Disable the queue in HW */
198 for (;;) {
199 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
200 if (rc != OPAL_BUSY)
201 break;
202 msleep(OPAL_BUSY_DELAY_MS);
203 }
204 if (rc)
205 vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
206 }
207
xive_native_disable_queue(u32 vp_id,struct xive_q * q,u8 prio)208 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
209 {
210 __xive_native_disable_queue(vp_id, q, prio);
211 }
212 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
213
xive_native_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)214 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
215 {
216 struct xive_q *q = &xc->queue[prio];
217 __be32 *qpage;
218
219 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
220 if (IS_ERR(qpage))
221 return PTR_ERR(qpage);
222
223 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
224 q, prio, qpage, xive_queue_shift, false);
225 }
226
xive_native_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)227 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
228 {
229 struct xive_q *q = &xc->queue[prio];
230 unsigned int alloc_order;
231
232 /*
233 * We use the variant with no iounmap as this is called on exec
234 * from an IPI and iounmap isn't safe
235 */
236 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
237 alloc_order = xive_alloc_order(xive_queue_shift);
238 free_pages((unsigned long)q->qpage, alloc_order);
239 q->qpage = NULL;
240 }
241
xive_native_match(struct device_node * node)242 static bool xive_native_match(struct device_node *node)
243 {
244 return of_device_is_compatible(node, "ibm,opal-xive-vc");
245 }
246
opal_xive_allocate_irq(u32 chip_id)247 static s64 opal_xive_allocate_irq(u32 chip_id)
248 {
249 s64 irq = opal_xive_allocate_irq_raw(chip_id);
250
251 /*
252 * Old versions of skiboot can incorrectly return 0xffffffff to
253 * indicate no space, fix it up here.
254 */
255 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
256 }
257
258 #ifdef CONFIG_SMP
xive_native_get_ipi(unsigned int cpu,struct xive_cpu * xc)259 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
260 {
261 s64 irq;
262
263 /* Allocate an IPI and populate info about it */
264 for (;;) {
265 irq = opal_xive_allocate_irq(xc->chip_id);
266 if (irq == OPAL_BUSY) {
267 msleep(OPAL_BUSY_DELAY_MS);
268 continue;
269 }
270 if (irq < 0) {
271 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
272 return -ENXIO;
273 }
274 xc->hw_ipi = irq;
275 break;
276 }
277 return 0;
278 }
279 #endif /* CONFIG_SMP */
280
xive_native_alloc_irq_on_chip(u32 chip_id)281 u32 xive_native_alloc_irq_on_chip(u32 chip_id)
282 {
283 s64 rc;
284
285 for (;;) {
286 rc = opal_xive_allocate_irq(chip_id);
287 if (rc != OPAL_BUSY)
288 break;
289 msleep(OPAL_BUSY_DELAY_MS);
290 }
291 if (rc < 0)
292 return 0;
293 return rc;
294 }
295 EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
296
xive_native_free_irq(u32 irq)297 void xive_native_free_irq(u32 irq)
298 {
299 for (;;) {
300 s64 rc = opal_xive_free_irq(irq);
301 if (rc != OPAL_BUSY)
302 break;
303 msleep(OPAL_BUSY_DELAY_MS);
304 }
305 }
306 EXPORT_SYMBOL_GPL(xive_native_free_irq);
307
308 #ifdef CONFIG_SMP
xive_native_put_ipi(unsigned int cpu,struct xive_cpu * xc)309 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
310 {
311 s64 rc;
312
313 /* Free the IPI */
314 if (xc->hw_ipi == XIVE_BAD_IRQ)
315 return;
316 for (;;) {
317 rc = opal_xive_free_irq(xc->hw_ipi);
318 if (rc == OPAL_BUSY) {
319 msleep(OPAL_BUSY_DELAY_MS);
320 continue;
321 }
322 xc->hw_ipi = XIVE_BAD_IRQ;
323 break;
324 }
325 }
326 #endif /* CONFIG_SMP */
327
xive_native_shutdown(void)328 static void xive_native_shutdown(void)
329 {
330 /* Switch the XIVE to emulation mode */
331 opal_xive_reset(OPAL_XIVE_MODE_EMU);
332 }
333
334 /*
335 * Perform an "ack" cycle on the current thread, thus
336 * grabbing the pending active priorities and updating
337 * the CPPR to the most favored one.
338 */
xive_native_update_pending(struct xive_cpu * xc)339 static void xive_native_update_pending(struct xive_cpu *xc)
340 {
341 u8 he, cppr;
342 u16 ack;
343
344 /* Perform the acknowledge hypervisor to register cycle */
345 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
346
347 /* Synchronize subsequent queue accesses */
348 mb();
349
350 /*
351 * Grab the CPPR and the "HE" field which indicates the source
352 * of the hypervisor interrupt (if any)
353 */
354 cppr = ack & 0xff;
355 he = (ack >> 8) >> 6;
356 switch(he) {
357 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
358 break;
359 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
360 if (cppr == 0xff)
361 return;
362 /* Mark the priority pending */
363 xc->pending_prio |= 1 << cppr;
364
365 /*
366 * A new interrupt should never have a CPPR less favored
367 * than our current one.
368 */
369 if (cppr >= xc->cppr)
370 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
371 smp_processor_id(), cppr, xc->cppr);
372
373 /* Update our idea of what the CPPR is */
374 xc->cppr = cppr;
375 break;
376 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
377 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
378 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
379 smp_processor_id(), he);
380 return;
381 }
382 }
383
xive_native_prepare_cpu(unsigned int cpu,struct xive_cpu * xc)384 static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
385 {
386 xc->chip_id = cpu_to_chip_id(cpu);
387 }
388
xive_native_setup_cpu(unsigned int cpu,struct xive_cpu * xc)389 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
390 {
391 s64 rc;
392 u32 vp;
393 __be64 vp_cam_be;
394 u64 vp_cam;
395
396 if (xive_pool_vps == XIVE_INVALID_VP)
397 return;
398
399 /* Check if pool VP already active, if it is, pull it */
400 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
401 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
402
403 /* Enable the pool VP */
404 vp = xive_pool_vps + cpu;
405 for (;;) {
406 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
407 if (rc != OPAL_BUSY)
408 break;
409 msleep(OPAL_BUSY_DELAY_MS);
410 }
411 if (rc) {
412 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
413 return;
414 }
415
416 /* Grab it's CAM value */
417 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
418 if (rc) {
419 pr_err("Failed to get pool VP info CPU %d\n", cpu);
420 return;
421 }
422 vp_cam = be64_to_cpu(vp_cam_be);
423
424 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
425 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
426 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
427 }
428
xive_native_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)429 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
430 {
431 s64 rc;
432 u32 vp;
433
434 if (xive_pool_vps == XIVE_INVALID_VP)
435 return;
436
437 /* Pull the pool VP from the CPU */
438 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
439
440 /* Disable it */
441 vp = xive_pool_vps + cpu;
442 for (;;) {
443 rc = opal_xive_set_vp_info(vp, 0, 0);
444 if (rc != OPAL_BUSY)
445 break;
446 msleep(OPAL_BUSY_DELAY_MS);
447 }
448 }
449
xive_native_sync_source(u32 hw_irq)450 void xive_native_sync_source(u32 hw_irq)
451 {
452 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
453 }
454 EXPORT_SYMBOL_GPL(xive_native_sync_source);
455
xive_native_sync_queue(u32 hw_irq)456 void xive_native_sync_queue(u32 hw_irq)
457 {
458 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
459 }
460 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
461
462 static const struct xive_ops xive_native_ops = {
463 .populate_irq_data = xive_native_populate_irq_data,
464 .configure_irq = xive_native_configure_irq,
465 .get_irq_config = xive_native_get_irq_config,
466 .setup_queue = xive_native_setup_queue,
467 .cleanup_queue = xive_native_cleanup_queue,
468 .match = xive_native_match,
469 .shutdown = xive_native_shutdown,
470 .update_pending = xive_native_update_pending,
471 .prepare_cpu = xive_native_prepare_cpu,
472 .setup_cpu = xive_native_setup_cpu,
473 .teardown_cpu = xive_native_teardown_cpu,
474 .sync_source = xive_native_sync_source,
475 #ifdef CONFIG_SMP
476 .get_ipi = xive_native_get_ipi,
477 .put_ipi = xive_native_put_ipi,
478 #endif /* CONFIG_SMP */
479 .name = "native",
480 };
481
xive_parse_provisioning(struct device_node * np)482 static bool xive_parse_provisioning(struct device_node *np)
483 {
484 int rc;
485
486 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
487 &xive_provision_size) < 0)
488 return true;
489 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
490 if (rc < 0) {
491 pr_err("Error %d getting provision chips array\n", rc);
492 return false;
493 }
494 xive_provision_chip_count = rc;
495 if (rc == 0)
496 return true;
497
498 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
499 GFP_KERNEL);
500 if (WARN_ON(!xive_provision_chips))
501 return false;
502
503 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
504 xive_provision_chips,
505 xive_provision_chip_count);
506 if (rc < 0) {
507 pr_err("Error %d reading provision chips array\n", rc);
508 return false;
509 }
510
511 xive_provision_cache = kmem_cache_create("xive-provision",
512 xive_provision_size,
513 xive_provision_size,
514 0, NULL);
515 if (!xive_provision_cache) {
516 pr_err("Failed to allocate provision cache\n");
517 return false;
518 }
519 return true;
520 }
521
xive_native_setup_pools(void)522 static void xive_native_setup_pools(void)
523 {
524 /* Allocate a pool big enough */
525 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
526
527 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
528 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
529 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
530
531 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
532 xive_pool_vps, nr_cpu_ids);
533 }
534
xive_native_default_eq_shift(void)535 u32 xive_native_default_eq_shift(void)
536 {
537 return xive_queue_shift;
538 }
539 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
540
541 unsigned long xive_tima_os;
542 EXPORT_SYMBOL_GPL(xive_tima_os);
543
xive_native_init(void)544 bool __init xive_native_init(void)
545 {
546 struct device_node *np;
547 struct resource r;
548 void __iomem *tima;
549 struct property *prop;
550 u8 max_prio = 7;
551 const __be32 *p;
552 u32 val, cpu;
553 s64 rc;
554
555 if (xive_cmdline_disabled)
556 return false;
557
558 pr_devel("xive_native_init()\n");
559 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
560 if (!np) {
561 pr_devel("not found !\n");
562 return false;
563 }
564 pr_devel("Found %pOF\n", np);
565
566 /* Resource 1 is HV window */
567 if (of_address_to_resource(np, 1, &r)) {
568 pr_err("Failed to get thread mgmnt area resource\n");
569 return false;
570 }
571 tima = ioremap(r.start, resource_size(&r));
572 if (!tima) {
573 pr_err("Failed to map thread mgmnt area\n");
574 return false;
575 }
576
577 /* Read number of priorities */
578 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
579 max_prio = val - 1;
580
581 /* Iterate the EQ sizes and pick one */
582 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
583 xive_queue_shift = val;
584 if (val == PAGE_SHIFT)
585 break;
586 }
587
588 /* Do we support single escalation */
589 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
590 xive_has_single_esc = true;
591
592 if (of_get_property(np, "vp-save-restore", NULL))
593 xive_has_save_restore = true;
594
595 /* Configure Thread Management areas for KVM */
596 for_each_possible_cpu(cpu)
597 kvmppc_set_xive_tima(cpu, r.start, tima);
598
599 /* Resource 2 is OS window */
600 if (of_address_to_resource(np, 2, &r)) {
601 pr_err("Failed to get thread mgmnt area resource\n");
602 return false;
603 }
604
605 xive_tima_os = r.start;
606
607 /* Grab size of provisionning pages */
608 xive_parse_provisioning(np);
609
610 /* Switch the XIVE to exploitation mode */
611 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
612 if (rc) {
613 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
614 return false;
615 }
616
617 /* Setup some dummy HV pool VPs */
618 xive_native_setup_pools();
619
620 /* Initialize XIVE core with our backend */
621 if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
622 max_prio)) {
623 opal_xive_reset(OPAL_XIVE_MODE_EMU);
624 return false;
625 }
626 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
627 return true;
628 }
629
xive_native_provision_pages(void)630 static bool xive_native_provision_pages(void)
631 {
632 u32 i;
633 void *p;
634
635 for (i = 0; i < xive_provision_chip_count; i++) {
636 u32 chip = xive_provision_chips[i];
637
638 /*
639 * XXX TODO: Try to make the allocation local to the node where
640 * the chip resides.
641 */
642 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
643 if (!p) {
644 pr_err("Failed to allocate provisioning page\n");
645 return false;
646 }
647 kmemleak_ignore(p);
648 opal_xive_donate_page(chip, __pa(p));
649 }
650 return true;
651 }
652
xive_native_alloc_vp_block(u32 max_vcpus)653 u32 xive_native_alloc_vp_block(u32 max_vcpus)
654 {
655 s64 rc;
656 u32 order;
657
658 order = fls(max_vcpus) - 1;
659 if (max_vcpus > (1 << order))
660 order++;
661
662 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
663 max_vcpus, order);
664
665 for (;;) {
666 rc = opal_xive_alloc_vp_block(order);
667 switch (rc) {
668 case OPAL_BUSY:
669 msleep(OPAL_BUSY_DELAY_MS);
670 break;
671 case OPAL_XIVE_PROVISIONING:
672 if (!xive_native_provision_pages())
673 return XIVE_INVALID_VP;
674 break;
675 default:
676 if (rc < 0) {
677 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
678 order, rc);
679 return XIVE_INVALID_VP;
680 }
681 return rc;
682 }
683 }
684 }
685 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
686
xive_native_free_vp_block(u32 vp_base)687 void xive_native_free_vp_block(u32 vp_base)
688 {
689 s64 rc;
690
691 if (vp_base == XIVE_INVALID_VP)
692 return;
693
694 rc = opal_xive_free_vp_block(vp_base);
695 if (rc < 0)
696 pr_warn("OPAL error %lld freeing VP block\n", rc);
697 }
698 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
699
xive_native_enable_vp(u32 vp_id,bool single_escalation)700 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
701 {
702 s64 rc;
703 u64 flags = OPAL_XIVE_VP_ENABLED;
704
705 if (single_escalation)
706 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
707 for (;;) {
708 rc = opal_xive_set_vp_info(vp_id, flags, 0);
709 if (rc != OPAL_BUSY)
710 break;
711 msleep(OPAL_BUSY_DELAY_MS);
712 }
713 if (rc)
714 vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
715 return rc ? -EIO : 0;
716 }
717 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
718
xive_native_disable_vp(u32 vp_id)719 int xive_native_disable_vp(u32 vp_id)
720 {
721 s64 rc;
722
723 for (;;) {
724 rc = opal_xive_set_vp_info(vp_id, 0, 0);
725 if (rc != OPAL_BUSY)
726 break;
727 msleep(OPAL_BUSY_DELAY_MS);
728 }
729 if (rc)
730 vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
731 return rc ? -EIO : 0;
732 }
733 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
734
xive_native_get_vp_info(u32 vp_id,u32 * out_cam_id,u32 * out_chip_id)735 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
736 {
737 __be64 vp_cam_be;
738 __be32 vp_chip_id_be;
739 s64 rc;
740
741 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
742 if (rc) {
743 vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
744 return -EIO;
745 }
746 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
747 *out_chip_id = be32_to_cpu(vp_chip_id_be);
748
749 return 0;
750 }
751 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
752
xive_native_has_single_escalation(void)753 bool xive_native_has_single_escalation(void)
754 {
755 return xive_has_single_esc;
756 }
757 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
758
xive_native_has_save_restore(void)759 bool xive_native_has_save_restore(void)
760 {
761 return xive_has_save_restore;
762 }
763 EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
764
xive_native_get_queue_info(u32 vp_id,u32 prio,u64 * out_qpage,u64 * out_qsize,u64 * out_qeoi_page,u32 * out_escalate_irq,u64 * out_qflags)765 int xive_native_get_queue_info(u32 vp_id, u32 prio,
766 u64 *out_qpage,
767 u64 *out_qsize,
768 u64 *out_qeoi_page,
769 u32 *out_escalate_irq,
770 u64 *out_qflags)
771 {
772 __be64 qpage;
773 __be64 qsize;
774 __be64 qeoi_page;
775 __be32 escalate_irq;
776 __be64 qflags;
777 s64 rc;
778
779 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
780 &qeoi_page, &escalate_irq, &qflags);
781 if (rc) {
782 vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
783 return -EIO;
784 }
785
786 if (out_qpage)
787 *out_qpage = be64_to_cpu(qpage);
788 if (out_qsize)
789 *out_qsize = be32_to_cpu(qsize);
790 if (out_qeoi_page)
791 *out_qeoi_page = be64_to_cpu(qeoi_page);
792 if (out_escalate_irq)
793 *out_escalate_irq = be32_to_cpu(escalate_irq);
794 if (out_qflags)
795 *out_qflags = be64_to_cpu(qflags);
796
797 return 0;
798 }
799 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
800
xive_native_get_queue_state(u32 vp_id,u32 prio,u32 * qtoggle,u32 * qindex)801 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
802 {
803 __be32 opal_qtoggle;
804 __be32 opal_qindex;
805 s64 rc;
806
807 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
808 &opal_qindex);
809 if (rc) {
810 vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
811 return -EIO;
812 }
813
814 if (qtoggle)
815 *qtoggle = be32_to_cpu(opal_qtoggle);
816 if (qindex)
817 *qindex = be32_to_cpu(opal_qindex);
818
819 return 0;
820 }
821 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
822
xive_native_set_queue_state(u32 vp_id,u32 prio,u32 qtoggle,u32 qindex)823 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
824 {
825 s64 rc;
826
827 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
828 if (rc) {
829 vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
830 return -EIO;
831 }
832
833 return 0;
834 }
835 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
836
xive_native_has_queue_state_support(void)837 bool xive_native_has_queue_state_support(void)
838 {
839 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
840 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
841 }
842 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
843
xive_native_get_vp_state(u32 vp_id,u64 * out_state)844 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
845 {
846 __be64 state;
847 s64 rc;
848
849 rc = opal_xive_get_vp_state(vp_id, &state);
850 if (rc) {
851 vp_err(vp_id, "failed to get vp state : %lld\n", rc);
852 return -EIO;
853 }
854
855 if (out_state)
856 *out_state = be64_to_cpu(state);
857 return 0;
858 }
859 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
860
861 machine_arch_initcall(powernv, xive_core_debug_init);
862