Lines Matching refs:kvm

46 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
52 struct kvm *kvm; member
117 void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) in kvmppc_set_hpt() argument
119 atomic64_set(&kvm->arch.mmio_update, 0); in kvmppc_set_hpt()
120 kvm->arch.hpt = *info; in kvmppc_set_hpt()
121 kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); in kvmppc_set_hpt()
124 info->virt, (long)info->order, kvm->arch.lpid); in kvmppc_set_hpt()
127 long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) in kvmppc_alloc_reset_hpt() argument
132 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_alloc_reset_hpt()
133 if (kvm->arch.mmu_ready) { in kvmppc_alloc_reset_hpt()
134 kvm->arch.mmu_ready = 0; in kvmppc_alloc_reset_hpt()
137 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmppc_alloc_reset_hpt()
138 kvm->arch.mmu_ready = 1; in kvmppc_alloc_reset_hpt()
142 if (kvm_is_radix(kvm)) { in kvmppc_alloc_reset_hpt()
143 err = kvmppc_switch_mmu_to_hpt(kvm); in kvmppc_alloc_reset_hpt()
148 if (kvm->arch.hpt.order == order) { in kvmppc_alloc_reset_hpt()
152 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
156 kvmppc_rmap_reset(kvm); in kvmppc_alloc_reset_hpt()
161 if (kvm->arch.hpt.virt) { in kvmppc_alloc_reset_hpt()
162 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_alloc_reset_hpt()
163 kvmppc_rmap_reset(kvm); in kvmppc_alloc_reset_hpt()
169 kvmppc_set_hpt(kvm, &info); in kvmppc_alloc_reset_hpt()
174 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_alloc_reset_hpt()
176 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_alloc_reset_hpt()
216 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma() local
225 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) in kvmppc_map_vrma()
226 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; in kvmppc_map_vrma()
237 & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_map_vrma()
247 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, in kvmppc_map_vrma()
283 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, in kvmppc_virtmode_do_h_enter() argument
290 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, in kvmppc_virtmode_do_h_enter()
291 kvm->mm->pgd, false, pte_idx_ret); in kvmppc_virtmode_do_h_enter()
335 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_book3s_64_hv_xlate() local
344 if (kvm_is_radix(vcpu->kvm)) in kvmppc_mmu_book3s_64_hv_xlate()
355 slb_v = vcpu->kvm->arch.vrma_slb_v; in kvmppc_mmu_book3s_64_hv_xlate()
360 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, in kvmppc_mmu_book3s_64_hv_xlate()
366 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
370 gr = kvm->arch.hpt.rev[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
433 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_hv_emulate_mmio()
436 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_hv_emulate_mmio()
486 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_hv_page_fault() local
505 if (kvm_is_radix(kvm)) in kvmppc_book3s_hv_page_fault()
518 mmio_update = atomic64_read(&kvm->arch.mmio_update); in kvmppc_book3s_hv_page_fault()
531 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
532 rev = &kvm->arch.hpt.rev[index]; in kvmppc_book3s_hv_page_fault()
556 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
573 mmu_seq = kvm->mmu_notifier_seq; in kvmppc_book3s_hv_page_fault()
609 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
610 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); in kvmppc_book3s_hv_page_fault()
614 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
673 if (!kvm->arch.mmu_ready) in kvmppc_book3s_hv_page_fault()
688 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { in kvmppc_book3s_hv_page_fault()
701 kvmppc_invalidate_hpte(kvm, hptep, index); in kvmppc_book3s_hv_page_fault()
705 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); in kvmppc_book3s_hv_page_fault()
733 void kvmppc_rmap_reset(struct kvm *kvm) in kvmppc_rmap_reset() argument
739 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_rmap_reset()
740 slots = kvm_memslots(kvm); in kvmppc_rmap_reset()
743 spin_lock(&kvm->mmu_lock); in kvmppc_rmap_reset()
750 spin_unlock(&kvm->mmu_lock); in kvmppc_rmap_reset()
752 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_rmap_reset()
756 static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, in kvmppc_unmap_hpte() argument
760 __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvmppc_unmap_hpte()
761 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvmppc_unmap_hpte()
784 kvmppc_invalidate_hpte(kvm, hptep, i); in kvmppc_unmap_hpte()
793 note_hpte_modification(kvm, &rev[i]); in kvmppc_unmap_hpte()
798 static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_unmap_rmapp() argument
819 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_unmap_rmapp()
828 kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); in kvm_unmap_rmapp()
834 bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range_hv() argument
838 if (kvm_is_radix(kvm)) { in kvm_unmap_gfn_range_hv()
840 kvm_unmap_radix(kvm, range->slot, gfn); in kvm_unmap_gfn_range_hv()
843 kvm_unmap_rmapp(kvm, range->slot, gfn); in kvm_unmap_gfn_range_hv()
849 void kvmppc_core_flush_memslot_hv(struct kvm *kvm, in kvmppc_core_flush_memslot_hv() argument
858 if (kvm_is_radix(kvm)) { in kvmppc_core_flush_memslot_hv()
859 kvmppc_radix_flush_memslot(kvm, memslot); in kvmppc_core_flush_memslot_hv()
871 kvm_unmap_rmapp(kvm, memslot, gfn); in kvmppc_core_flush_memslot_hv()
876 static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_age_rmapp() argument
879 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_age_rmapp()
899 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_age_rmapp()
917 kvmppc_clear_ref_hpte(kvm, hptep, i); in kvm_age_rmapp()
920 note_hpte_modification(kvm, &rev[i]); in kvm_age_rmapp()
931 bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn_hv() argument
936 if (kvm_is_radix(kvm)) { in kvm_age_gfn_hv()
938 ret |= kvm_age_radix(kvm, range->slot, gfn); in kvm_age_gfn_hv()
941 ret |= kvm_age_rmapp(kvm, range->slot, gfn); in kvm_age_gfn_hv()
947 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_test_age_rmapp() argument
950 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_age_rmapp()
967 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); in kvm_test_age_rmapp()
980 bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn_hv() argument
984 if (kvm_is_radix(kvm)) in kvm_test_age_gfn_hv()
985 return kvm_test_age_radix(kvm, range->slot, range->start); in kvm_test_age_gfn_hv()
987 return kvm_test_age_rmapp(kvm, range->slot, range->start); in kvm_test_age_gfn_hv()
990 bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn_hv() argument
994 if (kvm_is_radix(kvm)) in kvm_set_spte_gfn_hv()
995 kvm_unmap_radix(kvm, range->slot, range->start); in kvm_set_spte_gfn_hv()
997 kvm_unmap_rmapp(kvm, range->slot, range->start); in kvm_set_spte_gfn_hv()
1002 static int vcpus_running(struct kvm *kvm) in vcpus_running() argument
1004 return atomic_read(&kvm->arch.vcpus_running) != 0; in vcpus_running()
1011 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) in kvm_test_clear_dirty_npages() argument
1013 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_clear_dirty_npages()
1030 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_test_clear_dirty_npages()
1049 (!hpte_is_writable(hptep1) || vcpus_running(kvm))) in kvm_test_clear_dirty_npages()
1068 kvmppc_invalidate_hpte(kvm, hptep, i); in kvm_test_clear_dirty_npages()
1075 note_hpte_modification(kvm, &rev[i]); in kvm_test_clear_dirty_npages()
1110 long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, in kvmppc_hv_get_dirty_log_hpt() argument
1119 int npages = kvm_test_clear_dirty_npages(kvm, rmapp); in kvmppc_hv_get_dirty_log_hpt()
1133 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, in kvmppc_pin_guest_page() argument
1143 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_pin_guest_page()
1144 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_pin_guest_page()
1152 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_pin_guest_page()
1160 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_pin_guest_page()
1164 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, in kvmppc_unpin_guest_page() argument
1179 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_unpin_guest_page()
1180 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unpin_guest_page()
1183 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_unpin_guest_page()
1206 struct kvm *kvm = resize->kvm; in resize_hpt_rehash_hpte() local
1207 struct kvm_hpt_info *old = &kvm->arch.hpt; in resize_hpt_rehash_hpte()
1254 int srcu_idx = srcu_read_lock(&kvm->srcu); in resize_hpt_rehash_hpte()
1256 __gfn_to_memslot(kvm_memslots(kvm), gfn); in resize_hpt_rehash_hpte()
1263 kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); in resize_hpt_rehash_hpte()
1267 srcu_read_unlock(&kvm->srcu, srcu_idx); in resize_hpt_rehash_hpte()
1360 struct kvm *kvm = resize->kvm; in resize_hpt_rehash() local
1364 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { in resize_hpt_rehash()
1375 struct kvm *kvm = resize->kvm; in resize_hpt_pivot() local
1383 spin_lock(&kvm->mmu_lock); in resize_hpt_pivot()
1386 hpt_tmp = kvm->arch.hpt; in resize_hpt_pivot()
1387 kvmppc_set_hpt(kvm, &resize->hpt); in resize_hpt_pivot()
1390 spin_unlock(&kvm->mmu_lock); in resize_hpt_pivot()
1392 synchronize_srcu_expedited(&kvm->srcu); in resize_hpt_pivot()
1395 kvmppc_setup_partition_table(kvm); in resize_hpt_pivot()
1400 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) in resize_hpt_release() argument
1402 if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) in resize_hpt_release()
1414 if (kvm->arch.resize_hpt == resize) in resize_hpt_release()
1415 kvm->arch.resize_hpt = NULL; in resize_hpt_release()
1423 struct kvm *kvm = resize->kvm; in resize_hpt_prepare_work() local
1429 mutex_lock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1432 if (kvm->arch.resize_hpt == resize) { in resize_hpt_prepare_work()
1436 mutex_unlock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1449 mutex_lock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1457 if (kvm->arch.resize_hpt != resize) in resize_hpt_prepare_work()
1458 resize_hpt_release(kvm, resize); in resize_hpt_prepare_work()
1460 mutex_unlock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1463 long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, in kvm_vm_ioctl_resize_hpt_prepare() argument
1471 if (flags != 0 || kvm_is_radix(kvm)) in kvm_vm_ioctl_resize_hpt_prepare()
1477 mutex_lock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_prepare()
1479 resize = kvm->arch.resize_hpt; in kvm_vm_ioctl_resize_hpt_prepare()
1488 resize_hpt_release(kvm, resize); in kvm_vm_ioctl_resize_hpt_prepare()
1494 resize_hpt_release(kvm, resize); in kvm_vm_ioctl_resize_hpt_prepare()
1511 resize->kvm = kvm; in kvm_vm_ioctl_resize_hpt_prepare()
1513 kvm->arch.resize_hpt = resize; in kvm_vm_ioctl_resize_hpt_prepare()
1520 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_prepare()
1529 long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, in kvm_vm_ioctl_resize_hpt_commit() argument
1537 if (flags != 0 || kvm_is_radix(kvm)) in kvm_vm_ioctl_resize_hpt_commit()
1543 mutex_lock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_commit()
1545 resize = kvm->arch.resize_hpt; in kvm_vm_ioctl_resize_hpt_commit()
1549 if (WARN_ON(!kvm->arch.mmu_ready)) in kvm_vm_ioctl_resize_hpt_commit()
1553 kvm->arch.mmu_ready = 0; in kvm_vm_ioctl_resize_hpt_commit()
1576 kvm->arch.mmu_ready = 1; in kvm_vm_ioctl_resize_hpt_commit()
1579 resize_hpt_release(kvm, resize); in kvm_vm_ioctl_resize_hpt_commit()
1580 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_commit()
1603 struct kvm *kvm; member
1706 struct kvm *kvm = ctx->kvm; in kvm_htab_read() local
1719 if (kvm_is_radix(kvm)) in kvm_htab_read()
1726 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_read()
1727 revp = kvm->arch.hpt.rev + i; in kvm_htab_read()
1742 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1752 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1768 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1789 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { in kvm_htab_read()
1805 struct kvm *kvm = ctx->kvm; in kvm_htab_write() local
1819 if (kvm_is_radix(kvm)) in kvm_htab_write()
1823 mutex_lock(&kvm->arch.mmu_setup_lock); in kvm_htab_write()
1824 mmu_ready = kvm->arch.mmu_ready; in kvm_htab_write()
1826 kvm->arch.mmu_ready = 0; /* temporarily */ in kvm_htab_write()
1829 if (atomic_read(&kvm->arch.vcpus_running)) { in kvm_htab_write()
1830 kvm->arch.mmu_ready = 1; in kvm_htab_write()
1831 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_htab_write()
1851 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || in kvm_htab_write()
1852 i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) in kvm_htab_write()
1855 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_write()
1877 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); in kvm_htab_write()
1879 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, in kvm_htab_write()
1890 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvm_htab_write()
1894 kvmppc_update_lpcr(kvm, lpcr, in kvm_htab_write()
1897 kvmppc_setup_partition_table(kvm); in kvm_htab_write()
1907 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); in kvm_htab_write()
1917 kvm->arch.mmu_ready = mmu_ready; in kvm_htab_write()
1918 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_htab_write()
1931 atomic_dec(&ctx->kvm->arch.hpte_mod_interest); in kvm_htab_release()
1932 kvm_put_kvm(ctx->kvm); in kvm_htab_release()
1944 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) in kvm_vm_ioctl_get_htab_fd() argument
1956 kvm_get_kvm(kvm); in kvm_vm_ioctl_get_htab_fd()
1957 ctx->kvm = kvm; in kvm_vm_ioctl_get_htab_fd()
1966 kvm_put_kvm_no_destroy(kvm); in kvm_vm_ioctl_get_htab_fd()
1971 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_htab_fd()
1972 atomic_inc(&kvm->arch.hpte_mod_interest); in kvm_vm_ioctl_get_htab_fd()
1974 synchronize_srcu_expedited(&kvm->srcu); in kvm_vm_ioctl_get_htab_fd()
1975 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_htab_fd()
1982 struct kvm *kvm; member
1992 struct kvm *kvm = inode->i_private; in debugfs_htab_open() local
1999 kvm_get_kvm(kvm); in debugfs_htab_open()
2000 p->kvm = kvm; in debugfs_htab_open()
2011 kvm_put_kvm(p->kvm); in debugfs_htab_release()
2023 struct kvm *kvm; in debugfs_htab_read() local
2026 kvm = p->kvm; in debugfs_htab_read()
2027 if (kvm_is_radix(kvm)) in debugfs_htab_read()
2053 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in debugfs_htab_read()
2054 for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); in debugfs_htab_read()
2065 gr = kvm->arch.hpt.rev[i].guest_rpte; in debugfs_htab_read()
2113 void kvmppc_mmu_debugfs_init(struct kvm *kvm) in kvmppc_mmu_debugfs_init() argument
2115 debugfs_create_file("htab", 0400, kvm->arch.debugfs_dir, kvm, in kvmppc_mmu_debugfs_init()