Lines Matching refs:icp
33 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
39 struct kvmppc_ics *ics, struct kvmppc_icp *icp) in ics_rm_check_resend() argument
46 icp_rm_deliver_irq(xics, icp, state->number, true); in ics_rm_check_resend()
130 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; in icp_rm_set_vcpu_irq()
174 static inline bool icp_rm_try_update(struct kvmppc_icp *icp, in icp_rm_try_update() argument
185 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; in icp_rm_try_update()
205 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update()
208 this_vcpu->arch.icp->rm_dbgstate = new; in icp_rm_try_update()
209 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; in icp_rm_try_update()
216 struct kvmppc_icp *icp) in check_too_hard() argument
218 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; in check_too_hard()
222 struct kvmppc_icp *icp) in icp_rm_check_resend() argument
228 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { in icp_rm_check_resend()
231 if (!test_and_clear_bit(icsid, icp->resend_map)) in icp_rm_check_resend()
235 ics_rm_check_resend(xics, ics, icp); in icp_rm_check_resend()
239 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, in icp_rm_try_to_deliver() argument
246 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
272 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_try_to_deliver()
277 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_deliver_irq() argument
314 if (!icp || state->server != icp->server_num) { in icp_rm_deliver_irq()
315 icp = kvmppc_xics_find_server(xics->kvm, state->server); in icp_rm_deliver_irq()
316 if (!icp) { in icp_rm_deliver_irq()
366 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { in icp_rm_deliver_irq()
372 icp->n_reject++; in icp_rm_deliver_irq()
389 set_bit(ics->icsid, icp->resend_map); in icp_rm_deliver_irq()
398 if (!icp->state.need_resend) { in icp_rm_deliver_irq()
409 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_down_cppr() argument
445 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
469 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_down_cppr()
477 icp->n_check_resend++; in icp_rm_down_cppr()
478 icp_rm_check_resend(xics, icp); in icp_rm_down_cppr()
487 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_xirr() local
494 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_xirr()
504 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_xirr()
513 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_xirr()
518 return check_too_hard(xics, icp); in xics_rm_h_xirr()
526 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; in xics_rm_h_ipi() local
536 icp = this_icp; in xics_rm_h_ipi()
538 icp = kvmppc_xics_find_server(vcpu->kvm, server); in xics_rm_h_ipi()
539 if (!icp) in xics_rm_h_ipi()
570 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_ipi()
591 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_ipi()
596 icp_rm_deliver_irq(xics, icp, reject, false); in xics_rm_h_ipi()
602 icp_rm_check_resend(xics, icp); in xics_rm_h_ipi()
612 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_cppr() local
625 if (cppr > icp->state.cppr) { in xics_rm_h_cppr()
626 icp_rm_down_cppr(xics, icp, cppr); in xics_rm_h_cppr()
628 } else if (cppr == icp->state.cppr) in xics_rm_h_cppr()
642 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_cppr()
645 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_cppr()
656 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_cppr()
663 icp->n_reject++; in xics_rm_h_cppr()
664 icp_rm_deliver_irq(xics, icp, reject, false); in xics_rm_h_cppr()
667 return check_too_hard(xics, icp); in xics_rm_h_cppr()
673 struct kvmppc_icp *icp = vcpu->arch.icp; in ics_rm_eoi() local
705 icp->rm_action |= XICS_RM_NOTIFY_EOI; in ics_rm_eoi()
706 icp->rm_eoied_irq = irq; in ics_rm_eoi()
726 return check_too_hard(xics, icp); in ics_rm_eoi()
732 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_eoi() local
752 icp_rm_down_cppr(xics, icp, xirr >> 24); in xics_rm_h_eoi()
756 return check_too_hard(xics, icp); in xics_rm_h_eoi()
846 struct kvmppc_icp *icp; in kvmppc_deliver_irq_passthru() local
855 icp = vcpu->arch.icp; in kvmppc_deliver_irq_passthru()
873 icp_rm_deliver_irq(xics, icp, irq, false); in kvmppc_deliver_irq_passthru()
878 if (check_too_hard(xics, icp) == H_TOO_HARD) in kvmppc_deliver_irq_passthru()