Lines Matching refs:kvm

232 	struct kvm *kvm;  member
247 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_init() argument
262 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
263 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init()
264 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
272 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_free() argument
276 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
277 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free()
285 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
288 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
293 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_mark_gfn()
308 unsigned long uvmem_pfn, struct kvm *kvm) in kvmppc_gfn_secure_uvmem_pfn() argument
310 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
314 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
316 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
320 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
322 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared()
326 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument
328 kvmppc_mark_gfn(gfn, kvm, 0, 0); in kvmppc_gfn_remove()
332 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, in kvmppc_gfn_is_uvmem_pfn() argument
337 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_gfn_is_uvmem_pfn()
361 struct kvm *kvm, unsigned long *gfn) in kvmppc_next_nontransitioned_gfn() argument
367 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn()
388 static int kvmppc_memslot_page_merge(struct kvm *kvm, in kvmppc_memslot_page_merge() argument
392 unsigned long end, start = gfn_to_hva(kvm, gfn); in kvmppc_memslot_page_merge()
402 mmap_write_lock(kvm->mm); in kvmppc_memslot_page_merge()
404 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_memslot_page_merge()
418 mmap_write_unlock(kvm->mm); in kvmppc_memslot_page_merge()
422 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm, in __kvmppc_uvmem_memslot_delete() argument
425 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
426 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete()
427 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete()
430 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm, in __kvmppc_uvmem_memslot_create() argument
435 if (kvmppc_memslot_page_merge(kvm, memslot, false)) in __kvmppc_uvmem_memslot_create()
438 if (kvmppc_uvmem_slot_init(kvm, memslot)) in __kvmppc_uvmem_memslot_create()
441 ret = uv_register_mem_slot(kvm->arch.lpid, in __kvmppc_uvmem_memslot_create()
451 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_create()
453 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_create()
457 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) in kvmppc_h_svm_init_start() argument
464 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; in kvmppc_h_svm_init_start()
470 if (!kvm_is_radix(kvm)) in kvmppc_h_svm_init_start()
474 if (!kvm->arch.svm_enabled) in kvmppc_h_svm_init_start()
477 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_start()
480 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_start()
482 ret = __kvmppc_uvmem_memslot_create(kvm, memslot); in kvmppc_h_svm_init_start()
488 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_start()
492 __kvmppc_uvmem_memslot_delete(kvm, memslot); in kvmppc_h_svm_init_start()
496 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_start()
508 struct kvm *kvm, unsigned long gpa) in __kvmppc_svm_page_out() argument
527 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out()
559 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out()
580 struct kvm *kvm, unsigned long gpa) in kvmppc_svm_page_out() argument
584 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
585 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa); in kvmppc_svm_page_out()
586 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
600 struct kvm *kvm, bool skip_page_out) in kvmppc_uvmem_drop_pages() argument
609 mmap_read_lock(kvm->mm); in kvmppc_uvmem_drop_pages()
618 vma = vma_lookup(kvm->mm, addr); in kvmppc_uvmem_drop_pages()
625 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
627 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_uvmem_drop_pages()
634 PAGE_SHIFT, kvm, pvt->gpa)) in kvmppc_uvmem_drop_pages()
639 kvmppc_gfn_remove(gfn, kvm); in kvmppc_uvmem_drop_pages()
642 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
645 mmap_read_unlock(kvm->mm); in kvmppc_uvmem_drop_pages()
648 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) in kvmppc_h_svm_init_abort() argument
657 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_abort()
660 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_h_svm_init_abort()
663 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_abort()
665 kvm_for_each_memslot(memslot, kvm_memslots(kvm)) in kvmppc_h_svm_init_abort()
666 kvmppc_uvmem_drop_pages(memslot, kvm, false); in kvmppc_h_svm_init_abort()
668 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_abort()
670 kvm->arch.secure_guest = 0; in kvmppc_h_svm_init_abort()
671 uv_svm_terminate(kvm->arch.lpid); in kvmppc_h_svm_init_abort()
684 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument
708 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page()
711 pvt->kvm = kvm; in kvmppc_uvmem_get_page()
732 unsigned long end, unsigned long gpa, struct kvm *kvm, in kvmppc_svm_page_in() argument
760 dpage = kvmppc_uvmem_get_page(gpa, kvm); in kvmppc_svm_page_in()
770 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in()
784 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm, in kvmppc_uv_migrate_mem_slot() argument
792 mmap_read_lock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
793 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
794 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) { in kvmppc_uv_migrate_mem_slot()
796 start = gfn_to_hva(kvm, gfn); in kvmppc_uv_migrate_mem_slot()
801 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_uv_migrate_mem_slot()
806 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false); in kvmppc_uv_migrate_mem_slot()
815 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
816 mmap_read_unlock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
820 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) in kvmppc_h_svm_init_done() argument
827 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_done()
831 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_done()
832 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_done()
834 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); in kvmppc_h_svm_init_done()
850 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; in kvmppc_h_svm_init_done()
851 pr_info("LPID %d went secure\n", kvm->arch.lpid); in kvmppc_h_svm_init_done()
854 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_done()
867 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, in kvmppc_share_page() argument
879 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_share_page()
880 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
881 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
893 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
894 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page()
898 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
899 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
908 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, in kvmppc_share_page()
910 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page()
914 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
916 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_share_page()
926 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_in() argument
936 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_in()
946 return kvmppc_share_page(kvm, gpa, page_shift); in kvmppc_h_svm_page_in()
949 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_in()
950 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_in()
952 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in()
956 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
958 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_h_svm_page_in()
962 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_in()
966 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, in kvmppc_h_svm_page_in()
973 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
975 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_in()
976 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_in()
995 pvt->kvm, pvt->gpa)) in kvmppc_uvmem_migrate_to_ram()
1021 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1023 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1036 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_out() argument
1045 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_out()
1055 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_out()
1056 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_out()
1057 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out()
1062 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_out()
1066 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) in kvmppc_h_svm_page_out()
1069 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_out()
1070 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_out()
1074 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) in kvmppc_send_page_to_uv() argument
1079 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_send_page_to_uv()
1083 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1084 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_send_page_to_uv()
1087 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()
1091 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1095 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new) in kvmppc_uvmem_memslot_create() argument
1097 int ret = __kvmppc_uvmem_memslot_create(kvm, new); in kvmppc_uvmem_memslot_create()
1100 ret = kvmppc_uv_migrate_mem_slot(kvm, new); in kvmppc_uvmem_memslot_create()
1105 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old) in kvmppc_uvmem_memslot_delete() argument
1107 __kvmppc_uvmem_memslot_delete(kvm, old); in kvmppc_uvmem_memslot_delete()