Lines Matching refs:arch
132 return kvm->arch.nested_enable && kvm_is_radix(kvm); in nesting_enabled()
236 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
302 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
314 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
315 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
316 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
317 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
318 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
320 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
325 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
331 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
332 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
333 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
334 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
339 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
348 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
409 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
415 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
417 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
419 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
421 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
423 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
424 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
426 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
427 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
428 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
430 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
432 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
433 vcpu->arch.last_inst); in kvmppc_dump_regs()
453 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
459 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
522 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
535 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
546 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
549 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
556 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
559 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
566 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
567 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
570 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
575 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
580 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
591 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
613 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
618 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
648 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
649 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
650 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
653 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
654 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
655 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
656 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
657 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
659 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
660 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
661 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
662 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
664 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
665 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
666 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
697 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
698 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
701 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
702 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
703 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
704 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
705 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
706 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
711 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
715 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
717 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
718 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
719 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
722 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
723 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
732 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
740 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
747 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
749 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
770 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
781 vcpu->arch.dawr0 = value1; in kvmppc_h_set_mode()
782 vcpu->arch.dawrx0 = value2; in kvmppc_h_set_mode()
791 if (!vcpu->kvm->arch.dawr1_enabled) in kvmppc_h_set_mode()
797 vcpu->arch.dawr1 = value1; in kvmppc_h_set_mode()
798 vcpu->arch.dawrx1 = value2; in kvmppc_h_set_mode()
890 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
905 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
919 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
920 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
923 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
948 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, in kvmppc_nested_h_rpt_invalidate()
984 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, in kvmppc_h_rpt_invalidate()
999 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
1058 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
1060 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
1083 if (list_empty(&kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
1169 if (!arch_get_random_seed_long(&vcpu->arch.regs.gpr[4])) in kvmppc_pseries_do_hcall()
1193 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1197 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1258 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1271 vcpu->arch.shregs.msr |= MSR_EE; in kvmppc_cede()
1272 vcpu->arch.ceded = 1; in kvmppc_cede()
1274 if (vcpu->arch.prodded) { in kvmppc_cede()
1275 vcpu->arch.prodded = 0; in kvmppc_cede()
1277 vcpu->arch.ceded = 0; in kvmppc_cede()
1323 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1341 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1380 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1387 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1392 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1393 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1401 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1402 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1440 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_exit_hv()
1443 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1444 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1447 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1452 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1478 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); in kvmppc_handle_exit_hv()
1486 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1487 ulong flags = vcpu->arch.shregs.msr & 0x083c0000; in kvmppc_handle_exit_hv()
1495 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1499 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1516 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
1525 if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_handle_exit_hv()
1560 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1578 if (vcpu->arch.fault_dsisr == HDSISR_CANARY) { in kvmppc_handle_exit_hv()
1594 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { in kvmppc_handle_exit_hv()
1596 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1601 if (!(vcpu->arch.shregs.msr & MSR_DR)) in kvmppc_handle_exit_hv()
1602 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1604 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1606 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1607 vsid, vcpu->arch.fault_dsisr, true); in kvmppc_handle_exit_hv()
1614 vcpu->arch.fault_dar, err); in kvmppc_handle_exit_hv()
1623 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1624 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & in kvmppc_handle_exit_hv()
1633 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_exit_hv()
1634 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1639 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { in kvmppc_handle_exit_hv()
1641 vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1646 if (!(vcpu->arch.shregs.msr & MSR_IR)) in kvmppc_handle_exit_hv()
1647 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1649 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1651 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1652 vsid, vcpu->arch.fault_dsisr, false); in kvmppc_handle_exit_hv()
1672 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1673 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1674 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1675 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1707 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && in kvmppc_handle_exit_hv()
1722 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1723 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1724 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1734 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_handle_nested_exit()
1748 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_nested_exit()
1751 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1752 vcpu->arch.shregs.msr); in kvmppc_handle_nested_exit()
1756 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1785 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
1800 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
1801 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
1803 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_nested_exit()
1804 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
1825 u64 cause = vcpu->arch.hfscr >> 56; in kvmppc_handle_nested_exit()
1833 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || in kvmppc_handle_nested_exit()
1835 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; in kvmppc_handle_nested_exit()
1842 &vcpu->arch.emul_inst); in kvmppc_handle_nested_exit()
1855 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
1891 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1892 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
1893 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1894 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1906 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
1910 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
1912 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1913 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1917 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1969 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
1999 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2002 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
2004 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
2027 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
2030 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
2033 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
2036 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
2039 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
2042 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
2045 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
2049 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
2052 *val = get_reg_val(id, vcpu->arch.mmcr[2]); in kvmppc_get_one_reg_hv()
2055 *val = get_reg_val(id, vcpu->arch.mmcra); in kvmppc_get_one_reg_hv()
2058 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
2061 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_get_one_reg_hv()
2065 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
2069 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
2072 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
2075 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
2078 *val = get_reg_val(id, vcpu->arch.sier[0]); in kvmppc_get_one_reg_hv()
2081 *val = get_reg_val(id, vcpu->arch.sier[1]); in kvmppc_get_one_reg_hv()
2084 *val = get_reg_val(id, vcpu->arch.sier[2]); in kvmppc_get_one_reg_hv()
2087 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
2090 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
2099 *val = get_reg_val(id, vcpu->arch.vcore->dpdes | in kvmppc_get_one_reg_hv()
2100 vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
2103 *val = get_reg_val(id, vcpu->arch.vcore->vtb); in kvmppc_get_one_reg_hv()
2106 *val = get_reg_val(id, vcpu->arch.dawr0); in kvmppc_get_one_reg_hv()
2109 *val = get_reg_val(id, vcpu->arch.dawrx0); in kvmppc_get_one_reg_hv()
2112 *val = get_reg_val(id, vcpu->arch.dawr1); in kvmppc_get_one_reg_hv()
2115 *val = get_reg_val(id, vcpu->arch.dawrx1); in kvmppc_get_one_reg_hv()
2118 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
2121 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
2124 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
2127 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
2130 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
2133 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
2136 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
2139 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
2142 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
2145 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2146 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2147 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2150 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2151 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
2152 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
2153 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2156 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2157 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
2158 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
2159 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2162 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
2166 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
2169 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
2173 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
2176 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
2179 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
2183 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
2191 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
2194 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
2201 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
2204 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
2207 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
2210 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
2213 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
2216 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
2219 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
2222 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
2226 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
2231 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
2234 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
2238 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
2241 *val = get_reg_val(id, vcpu->arch.dec_expires + in kvmppc_get_one_reg_hv()
2242 vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
2245 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
2248 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
2272 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2275 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
2278 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2281 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2284 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2287 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2290 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2294 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2297 vcpu->arch.mmcr[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2300 vcpu->arch.mmcra = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2303 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2306 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_set_one_reg_hv()
2310 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2314 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2317 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2320 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2323 vcpu->arch.sier[0] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2326 vcpu->arch.sier[1] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2329 vcpu->arch.sier[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2332 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2335 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2338 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2341 vcpu->arch.vcore->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2344 vcpu->arch.dawr0 = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2347 vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
2350 vcpu->arch.dawr1 = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2353 vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
2356 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2358 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
2359 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
2362 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2365 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2368 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2371 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2374 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2377 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2380 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2383 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2388 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2389 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2391 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2397 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2399 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2406 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2409 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2413 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
2423 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2427 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2430 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2433 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2437 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2445 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2448 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2454 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2457 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2460 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2463 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2466 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2469 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2472 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2475 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2479 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2484 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2487 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2494 vcpu->arch.dec_expires = set_reg_val(id, *val) - in kvmppc_set_one_reg_hv()
2495 vcpu->arch.vcore->tb_offset; in kvmppc_set_one_reg_hv()
2499 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2500 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2501 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2502 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2503 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2506 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2543 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2556 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2557 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2558 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2559 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2560 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2683 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
2684 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu, in debugfs_vcpu_init()
2705 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2712 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2714 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2717 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
2718 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
2721 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2722 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2723 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
2724 vcpu->arch.shregs.msr = MSR_ME; in kvmppc_core_vcpu_create_hv()
2725 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
2734 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | in kvmppc_core_vcpu_create_hv()
2737 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); in kvmppc_core_vcpu_create_hv()
2740 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2744 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2746 vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; in kvmppc_core_vcpu_create_hv()
2750 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
2752 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
2758 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
2762 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
2766 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
2769 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
2780 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
2781 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2782 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
2783 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
2784 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2795 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
2796 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
2797 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
2798 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
2800 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
2835 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
2836 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
2837 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
2854 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2855 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
2856 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
2857 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
2858 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2872 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
2878 dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now); in kvmppc_set_timer()
2879 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
2880 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
2890 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
2892 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2894 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
2895 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
2896 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
2897 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
2898 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2900 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
2950 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
2959 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); in radix_flush_cpu()
2960 cpu_in_guest = &kvm->arch.cpu_in_guest; in radix_flush_cpu()
2976 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
2984 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
2986 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
3006 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
3008 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
3020 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
3021 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
3022 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
3024 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
3026 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
3027 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); in kvmppc_start_thread()
3263 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
3264 vcpu->arch.ret = -EINTR; in prepare_threads()
3265 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3266 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
3267 vcpu->arch.dtl.update_pending) in prepare_threads()
3268 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
3272 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
3286 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
3315 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
3318 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
3343 if (now < vcpu->arch.dec_expires && in post_guest_process()
3350 if (vcpu->arch.trap) in post_guest_process()
3352 vcpu->arch.run_task); in post_guest_process()
3354 vcpu->arch.ret = ret; in post_guest_process()
3355 vcpu->arch.trap = 0; in post_guest_process()
3358 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
3359 if (vcpu->arch.pending_exceptions) in post_guest_process()
3361 if (vcpu->arch.ceded) in post_guest_process()
3367 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3383 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3481 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3505 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3507 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3640 if (!vcpu->arch.ptid) in kvmppc_run_core()
3642 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
3751 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); in kvmppc_run_core()
3775 mtspr(SPRN_DSCR, vcpu->arch.dscr); in load_spr_state()
3776 mtspr(SPRN_IAMR, vcpu->arch.iamr); in load_spr_state()
3777 mtspr(SPRN_PSPB, vcpu->arch.pspb); in load_spr_state()
3778 mtspr(SPRN_FSCR, vcpu->arch.fscr); in load_spr_state()
3779 mtspr(SPRN_TAR, vcpu->arch.tar); in load_spr_state()
3780 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); in load_spr_state()
3781 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); in load_spr_state()
3782 mtspr(SPRN_BESCR, vcpu->arch.bescr); in load_spr_state()
3783 mtspr(SPRN_TIDR, vcpu->arch.tid); in load_spr_state()
3784 mtspr(SPRN_AMR, vcpu->arch.amr); in load_spr_state()
3785 mtspr(SPRN_UAMOR, vcpu->arch.uamor); in load_spr_state()
3794 if (!(vcpu->arch.ctrl & 1)) in load_spr_state()
3800 vcpu->arch.ctrl = mfspr(SPRN_CTRLF); in store_spr_state()
3802 vcpu->arch.iamr = mfspr(SPRN_IAMR); in store_spr_state()
3803 vcpu->arch.pspb = mfspr(SPRN_PSPB); in store_spr_state()
3804 vcpu->arch.fscr = mfspr(SPRN_FSCR); in store_spr_state()
3805 vcpu->arch.tar = mfspr(SPRN_TAR); in store_spr_state()
3806 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); in store_spr_state()
3807 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); in store_spr_state()
3808 vcpu->arch.bescr = mfspr(SPRN_BESCR); in store_spr_state()
3809 vcpu->arch.tid = mfspr(SPRN_TIDR); in store_spr_state()
3810 vcpu->arch.amr = mfspr(SPRN_AMR); in store_spr_state()
3811 vcpu->arch.uamor = mfspr(SPRN_UAMOR); in store_spr_state()
3812 vcpu->arch.dscr = mfspr(SPRN_DSCR); in store_spr_state()
3846 if (host_os_sprs->amr != vcpu->arch.amr) in restore_p9_host_os_sprs()
3849 if (host_os_sprs->fscr != vcpu->arch.fscr) in restore_p9_host_os_sprs()
3853 if (!(vcpu->arch.ctrl & 1)) in restore_p9_host_os_sprs()
3869 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_p9_guest_entry()
3875 WARN_ON_ONCE(vcpu->arch.ceded); in kvmhv_p9_guest_entry()
3894 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3895 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3898 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
3903 kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); in kvmhv_p9_guest_entry()
3908 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3909 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3920 load_fp_state(&vcpu->arch.fp); in kvmhv_p9_guest_entry()
3922 load_vr_state(&vcpu->arch.vr); in kvmhv_p9_guest_entry()
3924 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvmhv_p9_guest_entry()
3940 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); in kvmhv_p9_guest_entry()
3954 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_p9_guest_entry()
3957 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_p9_guest_entry()
3959 if (vcpu->arch.nested) { in kvmhv_p9_guest_entry()
3960 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_p9_guest_entry()
3961 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_p9_guest_entry()
3963 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_p9_guest_entry()
3967 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_p9_guest_entry()
3968 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_p9_guest_entry()
3970 __pa(&vcpu->arch.regs)); in kvmhv_p9_guest_entry()
3972 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_p9_guest_entry()
3973 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_p9_guest_entry()
3974 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_p9_guest_entry()
3975 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_p9_guest_entry()
3979 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && in kvmhv_p9_guest_entry()
3988 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && in kvmhv_p9_guest_entry()
3989 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmhv_p9_guest_entry()
4013 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
4020 vcpu->arch.dec_expires = dec + tb; in kvmhv_p9_guest_entry()
4022 vcpu->arch.thread_cpu = -1; in kvmhv_p9_guest_entry()
4029 store_fp_state(&vcpu->arch.fp); in kvmhv_p9_guest_entry()
4031 store_vr_state(&vcpu->arch.vr); in kvmhv_p9_guest_entry()
4033 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvmhv_p9_guest_entry()
4037 kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); in kvmhv_p9_guest_entry()
4040 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
4041 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
4044 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
4084 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
4085 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
4090 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
4116 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
4117 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
4128 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
4145 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcore_check_block()
4273 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4274 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
4280 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
4283 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4297 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4298 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4304 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4306 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4307 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4308 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4309 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4310 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4311 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4332 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4335 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4343 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4357 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4361 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4362 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4365 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4370 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4372 v->arch.ceded = 0; in kvmppc_run_vcpu()
4389 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4398 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4402 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4409 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4414 return vcpu->arch.ret; in kvmppc_run_vcpu()
4425 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4430 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4431 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4433 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4434 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4435 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4436 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmhv_run_single_vcpu()
4437 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4438 vcpu->arch.busy_preempt = TB_NIL; in kvmhv_run_single_vcpu()
4439 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4445 if (!kvm->arch.mmu_ready) { in kvmhv_run_single_vcpu()
4450 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4473 if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4478 if (vcpu->arch.doorbell_request) { in kvmhv_run_single_vcpu()
4481 vcpu->arch.doorbell_request = 0; in kvmhv_run_single_vcpu()
4484 &vcpu->arch.pending_exceptions)) in kvmhv_run_single_vcpu()
4486 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4487 vcpu->arch.doorbell_request || in kvmhv_run_single_vcpu()
4489 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4514 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4543 cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); in kvmhv_run_single_vcpu()
4553 ((get_tb() < vcpu->arch.dec_expires) || in kvmhv_run_single_vcpu()
4566 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4568 if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && in kvmhv_run_single_vcpu()
4571 while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { in kvmhv_run_single_vcpu()
4575 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4583 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4592 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
4597 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4614 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
4646 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
4647 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
4648 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
4660 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4675 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
4676 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
4677 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
4682 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
4687 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
4704 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
4723 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
4724 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4834 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4835 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4836 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4837 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4852 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
4853 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
4864 slot->arch.rmap = vzalloc(array_size(npages, in kvmppc_core_prepare_memory_region_hv()
4865 sizeof(*slot->arch.rmap))); in kvmppc_core_prepare_memory_region_hv()
4866 if (!slot->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
4888 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
4909 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
4939 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
4942 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
4945 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
4953 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
4964 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
4965 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
4967 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
4970 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
4973 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
4974 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
4976 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
4995 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
5044 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
5078 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
5081 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
5103 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
5105 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
5197 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
5198 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
5199 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
5206 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
5220 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
5223 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
5224 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
5227 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
5231 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
5232 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5239 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
5267 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
5268 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
5273 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5280 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
5283 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
5293 kvm->arch.tlb_sets = 1; in kvmppc_core_init_vm_hv()
5295 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
5297 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
5299 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
5301 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
5318 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
5320 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
5321 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
5327 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); in kvmppc_core_init_vm_hv()
5340 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5341 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5346 debugfs_remove_recursive(kvm->arch.debugfs_dir); in kvmppc_core_destroy_vm_hv()
5357 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5363 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5364 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5365 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5366 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5369 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5410 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5436 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5444 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
5528 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
5531 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
5741 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5743 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
5744 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
5747 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
5748 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
5761 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
5769 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5784 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
5801 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
5820 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
5846 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
5866 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
5869 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
5870 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
5871 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
5872 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
5875 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
5876 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
5892 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
5897 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
5914 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
5915 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
5916 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
5917 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
5918 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
5922 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
5923 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
5925 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
5936 kvm->arch.dawr1_enabled = true; in kvmhv_enable_dawr1()