Lines Matching refs:tbl
53 iommu_tce_table_put(stit->tbl); in kvm_spapr_tce_iommu_table_free()
85 if (table_group->tables[i] != stit->tbl) in kvm_spapr_tce_release_iommu_group()
101 struct iommu_table *tbl = NULL; in kvm_spapr_tce_attach_iommu_group() local
144 tbl = iommu_tce_table_get(tbltmp); in kvm_spapr_tce_attach_iommu_group()
148 if (!tbl) in kvm_spapr_tce_attach_iommu_group()
153 if (tbl != stit->tbl) in kvm_spapr_tce_attach_iommu_group()
158 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group()
173 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group()
177 stit->tbl = tbl; in kvm_spapr_tce_attach_iommu_group()
380 long shift = stit->tbl->it_page_shift; in kvmppc_tce_validate()
402 u64 *tbl; in kvmppc_tce_put() local
418 tbl = page_to_virt(page); in kvmppc_tce_put()
420 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_tce_put()
423 static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, in kvmppc_clear_tce() argument
429 iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir); in kvmppc_clear_tce()
433 struct iommu_table *tbl, unsigned long entry) in kvmppc_tce_iommu_mapped_dec() argument
436 const unsigned long pgsize = 1ULL << tbl->it_page_shift; in kvmppc_tce_iommu_mapped_dec()
437 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); in kvmppc_tce_iommu_mapped_dec()
454 struct iommu_table *tbl, unsigned long entry) in kvmppc_tce_iommu_do_unmap() argument
460 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, in kvmppc_tce_iommu_do_unmap()
467 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); in kvmppc_tce_iommu_do_unmap()
469 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); in kvmppc_tce_iommu_do_unmap()
475 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, in kvmppc_tce_iommu_unmap() argument
479 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_tce_iommu_unmap()
483 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); in kvmppc_tce_iommu_unmap()
491 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, in kvmppc_tce_iommu_do_map() argument
497 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); in kvmppc_tce_iommu_do_map()
504 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); in kvmppc_tce_iommu_do_map()
509 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) in kvmppc_tce_iommu_do_map()
515 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); in kvmppc_tce_iommu_do_map()
522 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); in kvmppc_tce_iommu_do_map()
530 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, in kvmppc_tce_iommu_map() argument
535 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_tce_iommu_map()
539 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { in kvmppc_tce_iommu_map()
541 ret = kvmppc_tce_iommu_do_map(kvm, tbl, in kvmppc_tce_iommu_map()
588 stit->tbl, entry); in kvmppc_h_put_tce()
590 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, in kvmppc_h_put_tce()
593 iommu_tce_kill(stit->tbl, entry, 1); in kvmppc_h_put_tce()
596 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); in kvmppc_h_put_tce()
683 stit->tbl, entry + i, ua, in kvmppc_h_put_tce_indirect()
687 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, in kvmppc_h_put_tce_indirect()
698 iommu_tce_kill(stit->tbl, entry, npages); in kvmppc_h_put_tce_indirect()
732 stit->tbl, entry + i); in kvmppc_h_stuff_tce()
741 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); in kvmppc_h_stuff_tce()
750 iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages); in kvmppc_h_stuff_tce()