Lines Matching refs:pgd
365 __visible pgdval_t xen_pgd_val(pgd_t pgd) in xen_pgd_val() argument
367 return pte_mfn_to_pfn(pgd.pgd); in xen_pgd_val()
379 __visible pgd_t xen_make_pgd(pgdval_t pgd) in xen_make_pgd() argument
381 pgd = pte_pfn_to_mfn(pgd); in xen_make_pgd()
382 return native_make_pgd(pgd); in xen_make_pgd()
445 static pgd_t *xen_get_user_pgd(pgd_t *pgd) in xen_get_user_pgd() argument
447 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); in xen_get_user_pgd()
448 unsigned offset = pgd - pgd_page; in xen_get_user_pgd()
503 pgd_val.pgd = p4d_val_ma(val); in xen_set_p4d()
600 static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, in __xen_pgd_walk() argument
626 if (pgd_none(pgd[i])) in __xen_pgd_walk()
629 p4d = p4d_offset(&pgd[i], 0); in __xen_pgd_walk()
635 (*func)(mm, virt_to_page(pgd), PT_PGD); in __xen_pgd_walk()
643 __xen_pgd_walk(mm, mm->pgd, func, limit); in xen_pgd_walk()
728 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_pin() argument
730 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_pin()
732 trace_xen_mmu_pgd_pin(mm, pgd); in __xen_pgd_pin()
736 __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT); in __xen_pgd_pin()
738 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin()
751 __xen_pgd_pin(mm, mm->pgd); in xen_pgd_pin()
840 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_unpin() argument
842 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_unpin()
844 trace_xen_mmu_pgd_unpin(mm, pgd); in __xen_pgd_unpin()
848 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_unpin()
856 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); in __xen_pgd_unpin()
863 __xen_pgd_unpin(mm, mm->pgd); in xen_pgd_unpin()
912 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) in drop_mm_ref_this_cpu()
931 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) in xen_drop_mm_ref()
947 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) in xen_drop_mm_ref()
984 if (xen_page_pinned(mm->pgd)) in xen_exit_mmap()
1119 pgd_t *pgd; in xen_cleanmfnmap() local
1125 pgd = pgd_offset_k(vaddr); in xen_cleanmfnmap()
1126 p4d = p4d_offset(pgd, 0); in xen_cleanmfnmap()
1394 pgd_t *pgd = mm->pgd; in xen_pgd_alloc() local
1395 struct page *page = virt_to_page(pgd); in xen_pgd_alloc()
1399 BUG_ON(PagePinned(virt_to_page(pgd))); in xen_pgd_alloc()
1413 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); in xen_pgd_alloc()
1418 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) in xen_pgd_free() argument
1420 pgd_t *user_pgd = xen_get_user_pgd(pgd); in xen_pgd_free()
1530 bool pinned = xen_page_pinned(mm->pgd); in xen_alloc_ptpage()
1693 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) in xen_setup_kernel_pagetable() argument
1731 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable()
1734 addr[0] = (unsigned long)pgd; in xen_setup_kernel_pagetable()
1782 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in xen_setup_kernel_pagetable()
1839 pgd_t pgd; in xen_early_virt_to_phys() local
1845 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * in xen_early_virt_to_phys()
1846 sizeof(pgd))); in xen_early_virt_to_phys()
1847 if (!pgd_present(pgd)) in xen_early_virt_to_phys()
1850 pa = pgd_val(pgd) & PTE_PFN_MASK; in xen_early_virt_to_phys()
1888 pgd_t *pgd; in xen_relocate_p2m() local
1917 pgd = __va(read_cr3_pa()); in xen_relocate_p2m()
1957 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); in xen_relocate_p2m()
1974 set_pgd(pgd + 1, __pgd(0)); in xen_relocate_p2m()