/linux/arch/x86/kvm/mmu/ |
A D | spte.c | 68 return spte; in make_mmio_spte() 115 spte |= spte_shadow_accessed_mask(spte); in make_spte() 123 spte |= shadow_x_mask; in make_spte() 125 spte |= shadow_nx_mask; in make_spte() 174 spte |= spte_shadow_dirty_mask(spte); in make_spte() 178 spte = mark_spte_for_access_track(spte); in make_spte() 190 *new_spte = spte; in make_spte() 206 return spte; in make_nonleaf_spte() 249 return spte; in mark_spte_for_access_track() 264 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << in mark_spte_for_access_track() [all …]
|
A D | spte.h | 186 return spte == REMOVED_SPTE; in is_removed_spte() 205 static inline bool is_mmio_spte(u64 spte) in is_mmio_spte() argument 223 MMU_WARN_ON(!is_shadow_present_pte(spte)); in spte_ad_enabled() 229 MMU_WARN_ON(!is_shadow_present_pte(spte)); in spte_ad_need_write_protect() 252 return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; in is_access_track_spte() 280 : !is_access_track_spte(spte); in is_accessed_spte() 283 static inline bool is_dirty_spte(u64 spte) in is_dirty_spte() argument 287 return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; in is_dirty_spte() 311 u64 spte, int level) in is_rsvd_spte() argument 320 (spte & shadow_mmu_writable_mask); in spte_can_locklessly_be_made_writable() [all …]
|
A D | mmu.c | 382 u64 spte; member 489 return spte.spte; in __get_spte_lockless() 692 spte = mark_spte_for_access_track(spte); in mmu_spte_age() 919 rmap_printk("%p %llx 0->1\n", spte, *spte); in pte_list_add() 922 rmap_printk("%p %llx 1->many\n", spte, *spte); in pte_list_add() 930 rmap_printk("%p %llx many->many\n", spte, *spte); in pte_list_add() 1233 spte = spte & ~PT_WRITABLE_MASK; in spte_write_protect() 2238 u64 spte; in link_shadow_page() local 2694 u64 spte; in mmu_set_spte() local 3779 u64 spte; in get_walk() local [all …]
|
A D | mmutrace.h | 211 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 212 TP_ARGS(sptep, gfn, spte), 224 __entry->access = spte & ACC_ALL; 225 __entry->gen = get_mmio_spte_generation(spte); 312 TP_ARGS(spte, kvm_gen, spte_gen), 317 __field(u64, spte) 323 __entry->spte = spte; 339 __field(u64, spte) 350 __entry->spte = *sptep; 354 __entry->x = is_executable_pte(__entry->spte); [all …]
|
A D | paging_tmpl.h | 190 struct kvm_mmu_page *sp, u64 *spte, in FNAME() 207 drop_spte(vcpu->kvm, spte); in FNAME() 620 u64 *spte; in FNAME() local 639 spte = sp->spt + i; in FNAME() 642 if (spte == sptep) in FNAME() 645 if (is_shadow_present_pte(*spte)) in FNAME() 1094 u64 *sptep, spte; in FNAME() local 1130 spte = *sptep; in FNAME() 1134 spte_to_pfn(spte), spte, true, false, in FNAME() 1135 host_writable, &spte); in FNAME() [all …]
|
A D | tdp_iter.c | 63 tdp_ptep_t spte_to_child_pt(u64 spte, int level) in spte_to_child_pt() argument 69 if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) in spte_to_child_pt() 72 return (tdp_ptep_t)__va(spte_to_pfn(spte) << PAGE_SHIFT); in spte_to_child_pt()
|
A D | mmu_internal.h | 157 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
|
A D | tdp_mmu.h | 87 u64 *spte);
|
A D | tdp_mmu.c | 1515 u64 *spte) in kvm_tdp_mmu_fast_pf_get_last_sptep() argument 1523 *spte = iter.old_spte; in kvm_tdp_mmu_fast_pf_get_last_sptep()
|
/linux/Documentation/virt/kvm/ |
A D | locking.rst | 59 on the spte: 67 bit if spte.HOST_WRITEABLE = 1 and spte.WRITE_PROTECT = 1, to restore the saved 84 | spte is the shadow page table entry corresponding with gpte and | 85 | spte = pfn1 | 135 writable between reading spte and updating spte. Like below case: 140 | spte.W = 0 | 141 | spte.Accessed = 1 | 178 the spte is always atomically updated in this case. 180 3) flush tlbs due to spte updated 184 writable spte might be cached on a CPU's TLB. [all …]
|
A D | mmu.rst | 55 spte shadow pte (referring to pfns) 125 A nonleaf spte allows the hardware mmu to reach the leaf pages and 331 - walk the shadow page table to find the spte for the translation, 384 - kernel write fault: spte.u=0, spte.w=1 (allows full kernel access, 386 - read fault: spte.u=1, spte.w=0 (allows full read access, disallows kernel 395 If we get a user fetch or read fault, we'll change spte.u=1 and 421 To instantiate a large spte, four constraints must be satisfied: 423 - the spte must point to a large host page 450 When KVM finds an MMIO spte, it checks the generation number of the spte. 460 stored into the MMIO spte. Thus, the MMIO spte might be created based on [all …]
|
/linux/arch/s390/mm/ |
A D | pgtable.c | 666 pte_t spte, tpte; in ptep_shadow_pte() local 672 spte = *sptep; in ptep_shadow_pte() 673 if (!(pte_val(spte) & _PAGE_INVALID) && in ptep_shadow_pte() 674 !((pte_val(spte) & _PAGE_PROTECT) && in ptep_shadow_pte() 678 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | in ptep_shadow_pte()
|
/linux/arch/x86/kvm/ |
A D | Makefile | 20 mmu/spte.o
|
/linux/mm/ |
A D | hugetlb.c | 6493 pte_t *spte = NULL; in huge_pmd_share() local 6504 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share() 6506 if (spte) { in huge_pmd_share() 6507 get_page(virt_to_page(spte)); in huge_pmd_share() 6513 if (!spte) in huge_pmd_share() 6516 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share() 6519 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share() 6522 put_page(virt_to_page(spte)); in huge_pmd_share()
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
A D | vmm.c | 448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local 451 if (spte != next) in nvkm_vmm_ref_hwpt() 455 if (!spte) { in nvkm_vmm_ref_hwpt()
|