Lines Matching refs:domain

212 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,  in ipmmu_ctx_read_root()  argument
215 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); in ipmmu_ctx_read_root()
218 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_write_root() argument
221 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); in ipmmu_ctx_write_root()
224 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_write_all() argument
227 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
228 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); in ipmmu_ctx_write_all()
230 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); in ipmmu_ctx_write_all()
255 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_sync() argument
259 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { in ipmmu_tlb_sync()
262 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
270 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_invalidate() argument
274 reg = ipmmu_ctx_read_root(domain, IMCTR); in ipmmu_tlb_invalidate()
276 ipmmu_ctx_write_all(domain, IMCTR, reg); in ipmmu_tlb_invalidate()
278 ipmmu_tlb_sync(domain); in ipmmu_tlb_invalidate()
284 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_enable() argument
287 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable()
297 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | in ipmmu_utlb_enable()
299 mmu->utlb_ctx[utlb] = domain->context_id; in ipmmu_utlb_enable()
305 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_disable() argument
308 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable()
316 struct ipmmu_vmsa_domain *domain = cookie; in ipmmu_tlb_flush_all() local
318 ipmmu_tlb_invalidate(domain); in ipmmu_tlb_flush_all()
337 struct ipmmu_vmsa_domain *domain) in ipmmu_domain_allocate_context() argument
346 mmu->domains[ret] = domain; in ipmmu_domain_allocate_context()
369 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_setup_context() argument
375 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; in ipmmu_domain_setup_context()
376 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); in ipmmu_domain_setup_context()
377 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); in ipmmu_domain_setup_context()
384 if (domain->mmu->features->twobit_imttbcr_sl0) in ipmmu_domain_setup_context()
389 if (domain->mmu->features->cache_snoop) in ipmmu_domain_setup_context()
393 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); in ipmmu_domain_setup_context()
396 ipmmu_ctx_write_root(domain, IMMAIR0, in ipmmu_domain_setup_context()
397 domain->cfg.arm_lpae_s1_cfg.mair); in ipmmu_domain_setup_context()
400 if (domain->mmu->features->setup_imbuscr) in ipmmu_domain_setup_context()
401 ipmmu_ctx_write_root(domain, IMBUSCR, in ipmmu_domain_setup_context()
402 ipmmu_ctx_read_root(domain, IMBUSCR) & in ipmmu_domain_setup_context()
409 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); in ipmmu_domain_setup_context()
418 ipmmu_ctx_write_all(domain, IMCTR, in ipmmu_domain_setup_context()
422 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_init_context() argument
437 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; in ipmmu_domain_init_context()
438 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; in ipmmu_domain_init_context()
439 domain->cfg.ias = 32; in ipmmu_domain_init_context()
440 domain->cfg.oas = 40; in ipmmu_domain_init_context()
441 domain->cfg.tlb = &ipmmu_flush_ops; in ipmmu_domain_init_context()
442 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); in ipmmu_domain_init_context()
443 domain->io_domain.geometry.force_aperture = true; in ipmmu_domain_init_context()
448 domain->cfg.coherent_walk = false; in ipmmu_domain_init_context()
449 domain->cfg.iommu_dev = domain->mmu->root->dev; in ipmmu_domain_init_context()
454 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); in ipmmu_domain_init_context()
458 domain->context_id = ret; in ipmmu_domain_init_context()
460 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, in ipmmu_domain_init_context()
461 domain); in ipmmu_domain_init_context()
462 if (!domain->iop) { in ipmmu_domain_init_context()
463 ipmmu_domain_free_context(domain->mmu->root, in ipmmu_domain_init_context()
464 domain->context_id); in ipmmu_domain_init_context()
468 ipmmu_domain_setup_context(domain); in ipmmu_domain_init_context()
472 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_destroy_context() argument
474 if (!domain->mmu) in ipmmu_domain_destroy_context()
483 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); in ipmmu_domain_destroy_context()
484 ipmmu_tlb_sync(domain); in ipmmu_domain_destroy_context()
485 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); in ipmmu_domain_destroy_context()
492 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_irq() argument
495 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq()
499 status = ipmmu_ctx_read_root(domain, IMSTR); in ipmmu_domain_irq()
503 iova = ipmmu_ctx_read_root(domain, IMELAR); in ipmmu_domain_irq()
505 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; in ipmmu_domain_irq()
513 ipmmu_ctx_write_root(domain, IMSTR, 0); in ipmmu_domain_irq()
532 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
572 struct ipmmu_vmsa_domain *domain; in ipmmu_domain_alloc() local
577 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in ipmmu_domain_alloc()
578 if (!domain) in ipmmu_domain_alloc()
581 mutex_init(&domain->mutex); in ipmmu_domain_alloc()
583 return &domain->io_domain; in ipmmu_domain_alloc()
588 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_domain_free() local
594 ipmmu_domain_destroy_context(domain); in ipmmu_domain_free()
595 free_io_pgtable_ops(domain->iop); in ipmmu_domain_free()
596 kfree(domain); in ipmmu_domain_free()
604 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_attach_device() local
613 mutex_lock(&domain->mutex); in ipmmu_attach_device()
615 if (!domain->mmu) { in ipmmu_attach_device()
617 domain->mmu = mmu; in ipmmu_attach_device()
618 ret = ipmmu_domain_init_context(domain); in ipmmu_attach_device()
621 domain->mmu = NULL; in ipmmu_attach_device()
624 domain->context_id); in ipmmu_attach_device()
626 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
632 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
635 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); in ipmmu_attach_device()
637 mutex_unlock(&domain->mutex); in ipmmu_attach_device()
643 ipmmu_utlb_enable(domain, fwspec->ids[i]); in ipmmu_attach_device()
652 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_detach_device() local
656 ipmmu_utlb_disable(domain, fwspec->ids[i]); in ipmmu_detach_device()
666 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_map() local
668 if (!domain) in ipmmu_map()
671 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); in ipmmu_map()
677 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_unmap() local
679 return domain->iop->unmap(domain->iop, iova, size, gather); in ipmmu_unmap()
684 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_flush_iotlb_all() local
686 if (domain->mmu) in ipmmu_flush_iotlb_all()
687 ipmmu_tlb_flush_all(domain); in ipmmu_flush_iotlb_all()
699 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_iova_to_phys() local
703 return domain->iop->iova_to_phys(domain->iop, iova); in ipmmu_iova_to_phys()