Home
last modified time | relevance | path

Searched refs:mmu (Results 1 – 25 of 2266) sorted by relevance

12345678910>>...91

/linux/drivers/staging/media/ipu3/
A Dipu3-mmu.c89 func(mmu); in call_if_imgu_is_powered()
217 if (!mmu) in __imgu_mmu_map()
346 if (!mmu) in __imgu_mmu_unmap()
432 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in imgu_mmu_init()
433 if (!mmu) in imgu_mmu_init()
467 mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts)); in imgu_mmu_init()
468 if (!mmu->l2pts) in imgu_mmu_init()
472 mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval); in imgu_mmu_init()
473 if (!mmu->l1pt) in imgu_mmu_init()
493 kfree(mmu); in imgu_mmu_init()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
A Dbase.c258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type()
259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type()
260 mmu->type_nr++; in nvkm_mmu_type()
268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap()
269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap()
270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap()
373 if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram) in nvkm_mmu_oneinit()
393 mmu->func->init(mmu); in nvkm_mmu_init()
406 return mmu; in nvkm_mmu_dtor()
421 mmu->func = func; in nvkm_mmu_ctor()
[all …]
A DKbuild2 nvkm-y += nvkm/subdev/mmu/base.o
3 nvkm-y += nvkm/subdev/mmu/nv04.o
4 nvkm-y += nvkm/subdev/mmu/nv41.o
5 nvkm-y += nvkm/subdev/mmu/nv44.o
6 nvkm-y += nvkm/subdev/mmu/nv50.o
7 nvkm-y += nvkm/subdev/mmu/g84.o
8 nvkm-y += nvkm/subdev/mmu/mcp77.o
9 nvkm-y += nvkm/subdev/mmu/gf100.o
10 nvkm-y += nvkm/subdev/mmu/gk104.o
11 nvkm-y += nvkm/subdev/mmu/gk20a.o
[all …]
A Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local
79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local
89 type = mmu->type[index].type; in nvkm_ummu_type()
108 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_kind() local
116 if (mmu->func->kind) in nvkm_ummu_kind()
117 kind = mmu->func->kind(mmu, &count, &kind_inv); in nvkm_ummu_kind()
159 struct nvkm_mmu *mmu = device->mmu; in nvkm_ummu_new() local
164 if (mmu->func->kind) in nvkm_ummu_new()
165 mmu->func->kind(mmu, &kinds, &unused); in nvkm_ummu_new()
[all …]
A Dumem.c72 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap()
90 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local
109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map()
145 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local
161 if (type >= mmu->type_nr) in nvkm_umem_new()
167 umem->mmu = mmu; in nvkm_umem_new()
168 umem->type = mmu->type[type].type; in nvkm_umem_new()
172 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new()
177 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
A Dmem.c33 struct nvkm_mmu *mmu; member
88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor()
147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host()
157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host()
158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host()
169 mem->mmu = mmu; in nvkm_mem_new_host()
199 if (mmu->dma_bits > 32) in nvkm_mem_new_host()
209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host()
230 if (mmu->type[type].type & NVKM_MEM_VRAM) { in nvkm_mem_new_type()
231 ret = mmu->func->mem.vram(mmu, type, page, size, in nvkm_mem_new_type()
[all …]
/linux/drivers/staging/media/atomisp/pci/mmu/
A Disp_mmu.c82 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr()
88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid()
325 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in mmu_map()
340 mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt); in mmu_map()
344 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); in mmu_map()
455 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in mmu_unmap()
456 mmu_unmap_l1_pt_error(mmu, mmu->l1_pte); in mmu_unmap()
461 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); in mmu_unmap()
545 if (!mmu) in isp_mmu_exit()
548 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in isp_mmu_exit()
[all …]
/linux/drivers/gpu/drm/nouveau/nvif/
A Dmmu.c30 kfree(mmu->kind); in nvif_mmu_dtor()
31 kfree(mmu->type); in nvif_mmu_dtor()
32 kfree(mmu->heap); in nvif_mmu_dtor()
50 mmu->heap = NULL; in nvif_mmu_ctor()
51 mmu->type = NULL; in nvif_mmu_ctor()
52 mmu->kind = NULL; in nvif_mmu_ctor()
69 mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), in nvif_mmu_ctor()
71 mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type), in nvif_mmu_ctor()
73 if (ret = -ENOMEM, !mmu->heap || !mmu->type) in nvif_mmu_ctor()
76 mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind), in nvif_mmu_ctor()
[all …]
A Dmem.c28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument
31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map()
48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument
72 ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass, in nvif_mem_ctor_type()
75 mem->type = mmu->type[type].type; in nvif_mem_ctor_type()
88 nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type, in nvif_mem_ctor() argument
95 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_ctor()
96 if ((mmu->type[i].type & type) == type) { in nvif_mem_ctor()
97 ret = nvif_mem_ctor_type(mmu, name, oclass, i, page, in nvif_mem_ctor()
/linux/drivers/iommu/
A Dipmmu-vmsa.c152 return mmu->root == mmu; in ipmmu_is_root()
227 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
287 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local
308 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable() local
344 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); in ipmmu_domain_allocate_context()
495 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq() local
617 domain->mmu = mmu; in ipmmu_attach_device()
626 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
632 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
832 if (!mmu) in ipmmu_probe_device()
[all …]
/linux/arch/x86/kernel/
A Dparavirt.c336 .mmu.tlb_remove_table =
368 .mmu.pmd_val = PTE_IDENT,
369 .mmu.make_pmd = PTE_IDENT,
371 .mmu.pud_val = PTE_IDENT,
372 .mmu.make_pud = PTE_IDENT,
377 .mmu.p4d_val = PTE_IDENT,
378 .mmu.make_p4d = PTE_IDENT,
383 .mmu.pte_val = PTE_IDENT,
384 .mmu.pgd_val = PTE_IDENT,
386 .mmu.make_pte = PTE_IDENT,
[all …]
/linux/drivers/staging/media/atomisp/include/mmu/
A Disp_mmu.h100 void (*tlb_flush_range)(struct isp_mmu *mmu,
102 void (*tlb_flush_all)(struct isp_mmu *mmu);
120 #define ISP_PTE_VALID_MASK(mmu) \ argument
121 ((mmu)->driver->pte_valid_mask)
123 #define ISP_PTE_VALID(mmu, pte) \ argument
124 ((pte) & ISP_PTE_VALID_MASK(mmu))
136 void isp_mmu_exit(struct isp_mmu *mmu);
156 if (mmu->driver && mmu->driver->tlb_flush_all) in isp_mmu_flush_tlb_all()
157 mmu->driver->tlb_flush_all(mmu); in isp_mmu_flush_tlb_all()
165 if (mmu->driver && mmu->driver->tlb_flush_range) in isp_mmu_flush_tlb_range()
[all …]
/linux/drivers/gpu/drm/panfrost/
A Dpanfrost_mmu.c164 as = mmu->as; in panfrost_mmu_as_get()
210 mmu->as = as; in panfrost_mmu_as_get()
240 mmu->as = -1; in panfrost_mmu_reset()
263 if (mmu->as < 0) in panfrost_mmu_flush_range()
545 kfree(mmu); in panfrost_mmu_release_ctx()
557 return mmu; in panfrost_mmu_ctx_get()
590 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in panfrost_mmu_ctx_create()
591 if (!mmu) in panfrost_mmu_ctx_create()
602 mmu->as = -1; in panfrost_mmu_ctx_create()
616 kfree(mmu); in panfrost_mmu_ctx_create()
[all …]
/linux/drivers/gpu/drm/msm/
A Dmsm_mmu.h13 void (*detach)(struct msm_mmu *mmu);
14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
17 void (*destroy)(struct msm_mmu *mmu);
18 void (*resume_translation)(struct msm_mmu *mmu);
38 mmu->dev = dev; in msm_mmu_init()
39 mmu->funcs = funcs; in msm_mmu_init()
40 mmu->type = type; in msm_mmu_init()
49 mmu->arg = arg; in msm_mmu_set_fault_handler()
50 mmu->handler = handler; in msm_mmu_set_fault_handler()
[all …]
A Dmsm_iommu.c68 msm_iommu_pagetable_unmap(mmu, iova, mapped); in msm_iommu_pagetable_map()
100 int msm_iommu_pagetable_params(struct msm_mmu *mmu, in msm_iommu_pagetable_params() argument
105 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) in msm_iommu_pagetable_params()
108 pagetable = to_pagetable(mmu); in msm_iommu_pagetable_params()
249 static void msm_iommu_detach(struct msm_mmu *mmu) in msm_iommu_detach() argument
251 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach()
253 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
259 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map()
274 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap()
284 static void msm_iommu_destroy(struct msm_mmu *mmu) in msm_iommu_destroy() argument
[all …]
A Dmsm_gem_vma.c18 if (aspace->mmu) in msm_gem_address_space_destroy()
19 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy()
54 if (aspace->mmu) in msm_gem_purge_vma()
55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma()
87 if (aspace && aspace->mmu) in msm_gem_map_vma()
88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma()
143 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, in msm_gem_address_space_create() argument
148 if (IS_ERR(mmu)) in msm_gem_address_space_create()
149 return ERR_CAST(mmu); in msm_gem_address_space_create()
157 aspace->mmu = mmu; in msm_gem_address_space_create()
A Dmsm_gpummu.c24 static void msm_gpummu_detach(struct msm_mmu *mmu) in msm_gpummu_detach() argument
28 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument
31 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_map()
56 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in msm_gpummu_unmap() argument
58 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_unmap()
71 static void msm_gpummu_resume_translation(struct msm_mmu *mmu) in msm_gpummu_resume_translation() argument
75 static void msm_gpummu_destroy(struct msm_mmu *mmu) in msm_gpummu_destroy() argument
77 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_destroy()
79 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in msm_gpummu_destroy()
114 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in msm_gpummu_params() argument
[all …]
/linux/arch/arc/mm/
A Dtlb.c134 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in local_flush_tlb_all() local
137 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
565 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in read_decode_mmu_bcr() local
589 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr()
596 mmu->u_dtlb = mmu3->u_dtlb; in read_decode_mmu_bcr()
598 mmu->sasid = mmu3->sasid; in read_decode_mmu_bcr()
607 mmu->sasid = mmu4->sasid; in read_decode_mmu_bcr()
640 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in arc_mmu_init() local
707 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument
722 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in do_tlb_overlap_fault() local
[all …]
/linux/arch/x86/include/asm/
A Dparavirt.h71 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local()
76 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global()
97 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap()
151 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2()
162 PVOP_ALT_VCALL1(mmu.write_cr3, x, in write_cr3()
363 PVOP_VCALL1(mmu.release_pte, pfn); in paravirt_release_pte()
373 PVOP_VCALL1(mmu.release_pmd, pfn); in paravirt_release_pmd()
382 PVOP_VCALL1(mmu.release_pud, pfn); in paravirt_release_pud()
566 PVOP_VCALL0(mmu.lazy_mode.enter); in arch_enter_lazy_mmu_mode()
571 PVOP_VCALL0(mmu.lazy_mode.leave); in arch_leave_lazy_mmu_mode()
[all …]
/linux/arch/arm64/kvm/hyp/nvhe/
A Dtlb.c17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument
42 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in __tlb_switch_to_guest()
58 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
66 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa()
112 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
119 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid()
128 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
133 __tlb_switch_to_guest(mmu, &cxt); in __kvm_flush_cpu_context()
/linux/arch/arm64/kvm/hyp/vhe/
A Dtlb.c19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument
56 __load_stage2(mmu, mmu->arch); in __tlb_switch_to_guest()
82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
90 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa()
114 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
121 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid()
130 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
135 __tlb_switch_to_guest(mmu, &cxt); in __kvm_flush_cpu_context()
/linux/arch/x86/kvm/mmu/
A Dmmu.c3363 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_direct_roots() local
3394 mmu->root_hpa = __pa(mmu->pae_root); in mmu_alloc_direct_roots()
3471 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_shadow_roots() local
3565 mmu->root_hpa = __pa(mmu->pml5_root); in mmu_alloc_shadow_roots()
3567 mmu->root_hpa = __pa(mmu->pml4_root); in mmu_alloc_shadow_roots()
3569 mmu->root_hpa = __pa(mmu->pae_root); in mmu_alloc_shadow_roots()
3581 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_special_roots() local
3604 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root)) in mmu_alloc_special_roots()
4157 struct kvm_mmu *mmu = vcpu->arch.mmu; in cached_root_available() local
4181 struct kvm_mmu *mmu = vcpu->arch.mmu; in fast_pgd_switch() local
[all …]
A Dpaging_tmpl.h34 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
54 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
67 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) argument
110 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
236 struct kvm_mmu *mmu, in FNAME()
247 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
364 walker->level = mmu->root_level; in FNAME()
365 pte = mmu->get_guest_pgd(vcpu); in FNAME()
503 if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu))) in FNAME()
670 top_level = vcpu->arch.mmu->root_level; in FNAME()
[all …]
/linux/drivers/gpu/drm/nouveau/include/nvif/
A Dmmu.h39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument
42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid()
49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument
52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type()
53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
/linux/drivers/gpu/drm/nouveau/
A Dnouveau_mem.c93 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_host() local
103 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) in nouveau_mem_host()
105 if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) { in nouveau_mem_host()
106 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_mem_host()
107 mem->kind = mmu->kind[mem->kind]; in nouveau_mem_host()
117 ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, in nouveau_mem_host()
130 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_vram() local
137 ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, in nouveau_mem_vram()
145 ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, in nouveau_mem_vram()
148 .bankswz = mmu->kind[mem->kind] == 2, in nouveau_mem_vram()

Completed in 58 milliseconds

12345678910>>...91