Lines Matching refs:kvm
123 static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr, in stage2_get_leaf_entry() argument
130 ptep = (pte_t *)kvm->arch.pgd; in stage2_get_leaf_entry()
152 static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) in stage2_remote_tlb_flush() argument
156 struct kvm_vmid *vmid = &kvm->arch.vmid; in stage2_remote_tlb_flush()
173 static int stage2_set_pte(struct kvm *kvm, u32 level, in stage2_set_pte() argument
178 pte_t *next_ptep = (pte_t *)kvm->arch.pgd; in stage2_set_pte()
206 stage2_remote_tlb_flush(kvm, current_level, addr); in stage2_set_pte()
211 static int stage2_map_page(struct kvm *kvm, in stage2_map_page() argument
253 return stage2_set_pte(kvm, level, pcache, gpa, &new_pte); in stage2_map_page()
262 static void stage2_op_pte(struct kvm *kvm, gpa_t addr, in stage2_op_pte() argument
290 stage2_op_pte(kvm, addr + i * next_page_size, in stage2_op_pte()
299 stage2_remote_tlb_flush(kvm, ptep_level, addr); in stage2_op_pte()
303 static void stage2_unmap_range(struct kvm *kvm, gpa_t start, in stage2_unmap_range() argument
314 found_leaf = stage2_get_leaf_entry(kvm, addr, in stage2_unmap_range()
324 stage2_op_pte(kvm, addr, ptep, in stage2_unmap_range()
335 cond_resched_lock(&kvm->mmu_lock); in stage2_unmap_range()
339 static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end) in stage2_wp_range() argument
349 found_leaf = stage2_get_leaf_entry(kvm, addr, in stage2_wp_range()
359 stage2_op_pte(kvm, addr, ptep, in stage2_wp_range()
367 static void stage2_wp_memory_region(struct kvm *kvm, int slot) in stage2_wp_memory_region() argument
369 struct kvm_memslots *slots = kvm_memslots(kvm); in stage2_wp_memory_region()
374 spin_lock(&kvm->mmu_lock); in stage2_wp_memory_region()
375 stage2_wp_range(kvm, start, end); in stage2_wp_memory_region()
376 spin_unlock(&kvm->mmu_lock); in stage2_wp_memory_region()
377 kvm_flush_remote_tlbs(kvm); in stage2_wp_memory_region()
380 static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, in stage2_ioremap() argument
404 spin_lock(&kvm->mmu_lock); in stage2_ioremap()
405 ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte); in stage2_ioremap()
406 spin_unlock(&kvm->mmu_lock); in stage2_ioremap()
418 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
427 stage2_wp_range(kvm, start, end); in kvm_arch_mmu_enable_log_dirty_pt_masked()
430 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
434 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, in kvm_arch_flush_remote_tlbs_memslot() argument
437 kvm_flush_remote_tlbs(kvm); in kvm_arch_flush_remote_tlbs_memslot()
440 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) in kvm_arch_free_memslot() argument
444 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) in kvm_arch_memslots_updated() argument
448 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
450 kvm_riscv_stage2_free_pgd(kvm); in kvm_arch_flush_shadow_all()
453 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
459 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
460 stage2_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot()
461 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
464 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
476 stage2_wp_memory_region(kvm, mem->slot); in kvm_arch_commit_memory_region()
479 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
549 ret = stage2_ioremap(kvm, gpa, pa, in kvm_arch_prepare_memory_region()
560 spin_lock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
562 stage2_unmap_range(kvm, mem->guest_phys_addr, in kvm_arch_prepare_memory_region()
564 spin_unlock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
571 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range() argument
573 if (!kvm->arch.pgd) in kvm_unmap_gfn_range()
576 stage2_unmap_range(kvm, range->start << PAGE_SHIFT, in kvm_unmap_gfn_range()
582 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn() argument
587 if (!kvm->arch.pgd) in kvm_set_spte_gfn()
592 ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT, in kvm_set_spte_gfn()
602 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn() argument
608 if (!kvm->arch.pgd) in kvm_age_gfn()
613 if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_age_gfn()
620 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn() argument
626 if (!kvm->arch.pgd) in kvm_test_age_gfn()
631 if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_test_age_gfn()
648 struct kvm *kvm = vcpu->kvm; in kvm_riscv_stage2_map() local
691 mmu_seq = kvm->mmu_notifier_seq; in kvm_riscv_stage2_map()
693 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); in kvm_riscv_stage2_map()
709 spin_lock(&kvm->mmu_lock); in kvm_riscv_stage2_map()
711 if (mmu_notifier_retry(kvm, mmu_seq)) in kvm_riscv_stage2_map()
716 mark_page_dirty(kvm, gfn); in kvm_riscv_stage2_map()
717 ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_stage2_map()
720 ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_stage2_map()
728 spin_unlock(&kvm->mmu_lock); in kvm_riscv_stage2_map()
739 int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm) in kvm_riscv_stage2_alloc_pgd() argument
743 if (kvm->arch.pgd != NULL) { in kvm_riscv_stage2_alloc_pgd()
752 kvm->arch.pgd = page_to_virt(pgd_page); in kvm_riscv_stage2_alloc_pgd()
753 kvm->arch.pgd_phys = page_to_phys(pgd_page); in kvm_riscv_stage2_alloc_pgd()
758 void kvm_riscv_stage2_free_pgd(struct kvm *kvm) in kvm_riscv_stage2_free_pgd() argument
762 spin_lock(&kvm->mmu_lock); in kvm_riscv_stage2_free_pgd()
763 if (kvm->arch.pgd) { in kvm_riscv_stage2_free_pgd()
764 stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false); in kvm_riscv_stage2_free_pgd()
765 pgd = READ_ONCE(kvm->arch.pgd); in kvm_riscv_stage2_free_pgd()
766 kvm->arch.pgd = NULL; in kvm_riscv_stage2_free_pgd()
767 kvm->arch.pgd_phys = 0; in kvm_riscv_stage2_free_pgd()
769 spin_unlock(&kvm->mmu_lock); in kvm_riscv_stage2_free_pgd()
778 struct kvm_arch *k = &vcpu->kvm->arch; in kvm_riscv_stage2_update_hgatp()