Lines Matching refs:xc

14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)  in GLUE()
40 xc->pending |= 1 << cppr; in GLUE()
44 if (cppr >= xc->hw_cppr) in GLUE()
46 smp_processor_id(), cppr, xc->hw_cppr); in GLUE()
54 xc->hw_cppr = cppr; in GLUE()
110 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc, in GLUE()
117 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in GLUE()
129 if (prio >= xc->cppr || prio > 7) { in GLUE()
130 if (xc->mfrr < xc->cppr) { in GLUE()
131 prio = xc->mfrr; in GLUE()
138 q = &xc->queues[prio]; in GLUE()
173 GLUE(X_PFX,source_eoi)(xc->vp_ipi, in GLUE()
174 &xc->vp_ipi_data); in GLUE()
214 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { in GLUE()
215 prio = xc->mfrr; in GLUE()
232 xc->pending = pending; in GLUE()
252 xc->cppr = prio; in GLUE()
259 if (xc->cppr != xc->hw_cppr) { in GLUE()
260 xc->hw_cppr = xc->cppr; in GLUE()
261 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); in GLUE()
269 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in GLUE() local
275 xc->GLUE(X_STAT_PFX,h_xirr)++; in GLUE()
278 GLUE(X_PFX,ack_pending)(xc); in GLUE()
281 xc->pending, xc->hw_cppr, xc->cppr); in GLUE()
284 old_cppr = xive_prio_to_guest(xc->cppr); in GLUE()
287 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch); in GLUE()
290 hirq, xc->hw_cppr, xc->cppr); in GLUE()
320 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in GLUE() local
321 u8 pending = xc->pending; in GLUE()
326 xc->GLUE(X_STAT_PFX,h_ipoll)++; in GLUE()
329 if (xc->server_num != server) { in GLUE()
333 xc = vcpu->arch.xive_vcpu; in GLUE()
345 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); in GLUE()
348 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); in GLUE()
353 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) in GLUE()
357 pending = xc->pending; in GLUE()
358 if (xc->mfrr != 0xff) { in GLUE()
359 if (xc->mfrr < 8) in GLUE()
360 pending |= 1 << xc->mfrr; in GLUE()
372 struct kvmppc_xive_vcpu *xc) in GLUE()
377 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { in GLUE()
378 struct xive_q *q = &xc->queues[prio]; in GLUE()
410 if (xc->server_num == state->act_server) in GLUE()
439 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in GLUE() local
445 xc->GLUE(X_STAT_PFX,h_cppr)++; in GLUE()
451 old_cppr = xc->cppr; in GLUE()
452 xc->cppr = cppr; in GLUE()
467 GLUE(X_PFX,push_pending_to_hw)(xc); in GLUE()
486 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc); in GLUE()
490 xc->hw_cppr = cppr; in GLUE()
501 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in GLUE() local
510 xc->GLUE(X_STAT_PFX,h_eoi)++; in GLUE()
512 xc->cppr = xive_prio_from_guest(new_cppr); in GLUE()
590 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi); in GLUE()
591 GLUE(X_PFX,push_pending_to_hw)(xc); in GLUE()
592 pr_devel(" after scan pending=%02x\n", xc->pending); in GLUE()
595 xc->hw_cppr = xc->cppr; in GLUE()
596 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); in GLUE()
604 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in GLUE() local
608 xc->GLUE(X_STAT_PFX,h_ipi)++; in GLUE()
614 xc = vcpu->arch.xive_vcpu; in GLUE()
617 xc->mfrr = mfrr; in GLUE()
632 if (mfrr < xc->cppr) in GLUE()
633 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in GLUE()