/linux/arch/m68k/include/asm/ |
A D | bitops.h | 59 #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr) argument 61 #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr) argument 68 #define __set_bit(nr, vaddr) set_bit(nr, vaddr) argument 99 #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr) argument 101 #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr) argument 108 #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) argument 139 #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr) argument 141 #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr) argument 148 #define __change_bit(nr, vaddr) change_bit(nr, vaddr) argument 204 #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) argument [all …]
|
/linux/arch/parisc/kernel/ |
A D | pci-dma.c | 83 vaddr &= ~PMD_MASK; in map_pte_uncached() 84 end = vaddr + size; in map_pte_uncached() 120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached() 145 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) in map_uncached_pages() 147 vaddr = vaddr + PGDIR_SIZE; in map_uncached_pages() 149 } while (vaddr && (vaddr < end)); in map_uncached_pages() 203 pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr); in unmap_uncached_pmd() 210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd() 223 unmap_uncached_pmd(dir, vaddr, end - vaddr); in unmap_uncached_pages() 224 vaddr = vaddr + PGDIR_SIZE; in unmap_uncached_pages() [all …]
|
/linux/arch/arm/mm/ |
A D | cache-xsc3l2.c | 88 unsigned long vaddr; in xsc3_l2_inv_range() local 101 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); in xsc3_l2_inv_range() 111 vaddr = l2_map_va(start, vaddr); in xsc3_l2_inv_range() 120 vaddr = l2_map_va(start, vaddr); in xsc3_l2_inv_range() 125 l2_unmap_va(vaddr); in xsc3_l2_inv_range() 132 unsigned long vaddr; in xsc3_l2_clean_range() local 138 vaddr = l2_map_va(start, vaddr); in xsc3_l2_clean_range() 143 l2_unmap_va(vaddr); in xsc3_l2_clean_range() 170 unsigned long vaddr; in xsc3_l2_flush_range() local 181 vaddr = l2_map_va(start, vaddr); in xsc3_l2_flush_range() [all …]
|
/linux/arch/riscv/mm/ |
A D | kasan_init.c | 64 ptep = base_pte + pte_index(vaddr); in kasan_populate_pte() 71 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end); in kasan_populate_pte() 89 next = pmd_addr_end(vaddr, end); in kasan_populate_pmd() 91 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { in kasan_populate_pmd() 100 } while (pmdp++, vaddr = next, vaddr != end); in kasan_populate_pmd() 118 next = pgd_addr_end(vaddr, end); in kasan_populate_pgd() 126 IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { in kasan_populate_pgd() 135 } while (pgdp++, vaddr = next, vaddr != end); in kasan_populate_pgd() 143 kasan_populate_pgd(vaddr, vend); in kasan_populate() 156 next = pgd_addr_end(vaddr, end); in kasan_shallow_populate_pgd() [all …]
|
/linux/arch/parisc/mm/ |
A D | fixmap.c | 15 unsigned long vaddr = __fix_to_virt(idx); in set_fixmap() local 16 pgd_t *pgd = pgd_offset_k(vaddr); in set_fixmap() 17 p4d_t *p4d = p4d_offset(pgd, vaddr); in set_fixmap() 18 pud_t *pud = pud_offset(p4d, vaddr); in set_fixmap() 19 pmd_t *pmd = pmd_offset(pud, vaddr); in set_fixmap() 23 pte = pte_alloc_kernel(pmd, vaddr); in set_fixmap() 25 pte = pte_offset_kernel(pmd, vaddr); in set_fixmap() 27 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); in set_fixmap() 33 pte_t *pte = virt_to_kpte(vaddr); in clear_fixmap() 38 pte_clear(&init_mm, vaddr, pte); in clear_fixmap() [all …]
|
/linux/arch/x86/mm/ |
A D | mem_encrypt.c | 134 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem() 265 unsigned long vaddr_end = vaddr + sz; in notify_range_enc_status_changed() 267 while (vaddr < vaddr_end) { in notify_range_enc_status_changed() 272 kpte = lookup_address(vaddr, &level); in notify_range_enc_status_changed() 287 vaddr = (vaddr & pmask) + psize; in notify_range_enc_status_changed() 342 start = vaddr; in early_set_memory_enc_dec() 343 vaddr_next = vaddr; in early_set_memory_enc_dec() 344 vaddr_end = vaddr + size; in early_set_memory_enc_dec() 346 for (; vaddr < vaddr_end; vaddr = vaddr_next) { in early_set_memory_enc_dec() 347 kpte = lookup_address(vaddr, &level); in early_set_memory_enc_dec() [all …]
|
A D | init_32.c | 135 unsigned long vaddr; in page_table_range_init_count() local 140 vaddr = start; in page_table_range_init_count() 141 pgd_idx = pgd_index(vaddr); in page_table_range_init_count() 142 pmd_idx = pmd_index(vaddr); in page_table_range_init_count() 150 vaddr += PMD_SIZE; in page_table_range_init_count() 212 unsigned long vaddr; in page_table_range_init() local 222 vaddr = start; in page_table_range_init() 223 pgd_idx = pgd_index(vaddr); in page_table_range_init() 235 vaddr += PMD_SIZE; in page_table_range_init() 398 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); in permanent_kmaps_init() [all …]
|
/linux/mm/ |
A D | highmem.c | 206 unsigned long vaddr; in map_new_virtual() local 255 return vaddr; in map_new_virtual() 268 unsigned long vaddr; in kmap_high() local 276 if (!vaddr) in kmap_high() 302 if (vaddr) { in kmap_high_get() 320 unsigned long vaddr; in kunmap_high() local 329 BUG_ON(!vaddr); in kunmap_high() 330 nr = PKMAP_NR(vaddr); in kunmap_high() 489 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { in kmap_high_unmap_local() 520 unsigned long vaddr; in __kmap_local_pfn_prot() local [all …]
|
/linux/arch/arm/mach-ixp4xx/include/mach/ |
A D | io.h | 97 const u8 *vaddr = p; in __indirect_writesb() local 122 const u16 *vaddr = p; in __indirect_writesw() local 143 const u32 *vaddr = p; in __indirect_writesl() local 167 u8 *vaddr = p; in __indirect_readsb() local 192 u16 *vaddr = p; in __indirect_readsw() local 215 u32 *vaddr = p; in __indirect_readsl() local 258 const u8 *vaddr = p; in outsb() local 277 const u16 *vaddr = p; in outsw() local 311 u8 *vaddr = p; in insb() local 331 u16 *vaddr = p; in insw() local [all …]
|
/linux/drivers/media/common/videobuf2/ |
A D | videobuf2-vmalloc.c | 26 void *vaddr; member 48 if (!buf->vaddr) { in vb2_vmalloc_alloc() 68 vfree(buf->vaddr); in vb2_vmalloc_put() 111 if (!buf->vaddr) in vb2_vmalloc_get_userptr() 127 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; in vb2_vmalloc_put_userptr() local 135 if (vaddr) in vb2_vmalloc_put_userptr() 152 if (!buf->vaddr) { in vb2_vmalloc_vaddr() 157 return buf->vaddr; in vb2_vmalloc_vaddr() 216 void *vaddr = buf->vaddr; in vb2_vmalloc_dmabuf_ops_attach() local 381 buf->vaddr = map.vaddr; in vb2_vmalloc_map_dmabuf() [all …]
|
/linux/arch/m68k/sun3x/ |
A D | dvma.c | 79 unsigned long vaddr, int len) in dvma_map_cpu() argument 88 vaddr &= PAGE_MASK; in dvma_map_cpu() 90 end = PAGE_ALIGN(vaddr + len); in dvma_map_cpu() 93 pgd = pgd_offset_k(vaddr); in dvma_map_cpu() 94 p4d = p4d_offset(pgd, vaddr); in dvma_map_cpu() 95 pud = pud_offset(p4d, vaddr); in dvma_map_cpu() 127 __pa(kaddr), vaddr); in dvma_map_cpu() 132 vaddr += PAGE_SIZE; in dvma_map_cpu() 133 } while(vaddr < end3); in dvma_map_cpu() 135 } while(vaddr < end2); in dvma_map_cpu() [all …]
|
/linux/arch/sparc/mm/ |
A D | io-unit.c | 135 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); in iounit_get_area() 140 IOD(("%08lx\n", vaddr)); in iounit_get_area() 141 return vaddr; in iounit_get_area() 188 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; in iounit_unmap_page() 189 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); in iounit_unmap_page() 190 for (len += vaddr; vaddr < len; vaddr++) in iounit_unmap_page() 191 clear_bit(vaddr, iounit->bmap); in iounit_unmap_page() 199 unsigned long flags, vaddr, len; in iounit_unmap_sg() local 207 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); in iounit_unmap_sg() 208 for (len += vaddr; vaddr < len; vaddr++) in iounit_unmap_sg() [all …]
|
/linux/drivers/net/ethernet/freescale/fman/ |
A D | fman_muram.c | 47 unsigned long vaddr) in fman_muram_vbase_to_offset() argument 49 return vaddr - (unsigned long)muram->vbase; in fman_muram_vbase_to_offset() 68 void __iomem *vaddr; in fman_muram_init() local 81 vaddr = ioremap(base, size); in fman_muram_init() 82 if (!vaddr) { in fman_muram_init() 91 iounmap(vaddr); in fman_muram_init() 95 memset_io(vaddr, 0, (int)size); in fman_muram_init() 97 muram->vbase = vaddr; in fman_muram_init() 134 unsigned long vaddr; in fman_muram_alloc() local 136 vaddr = gen_pool_alloc(muram->pool, size); in fman_muram_alloc() [all …]
|
/linux/arch/nds32/mm/ |
A D | cacheflush.c | 206 if (aliasing(vaddr, (unsigned long)kfrom)) in copy_user_highpage() 208 vto = kremap0(vaddr, pto); in copy_user_highpage() 209 vfrom = kremap1(vaddr, pfrom); in copy_user_highpage() 225 if (aliasing(kto, vaddr) && kto != 0) { in clear_user_highpage() 229 vto = kremap0(vaddr, page_to_phys(page)); in clear_user_highpage() 251 unsigned long vaddr, kto; in flush_dcache_page() local 253 vaddr = page->index << PAGE_SHIFT; in flush_dcache_page() 254 if (aliasing(vaddr, kaddr)) { in flush_dcache_page() 271 vto = kremap0(vaddr, page_to_phys(page)); in copy_to_user_page() 292 vto = kremap0(vaddr, page_to_phys(page)); in copy_from_user_page() [all …]
|
/linux/arch/m68k/sun3/ |
A D | dvma.c | 23 static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) in dvma_page() argument 35 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { in dvma_page() 36 sun3_put_pte(vaddr, pte); in dvma_page() 37 ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; in dvma_page() 40 return (vaddr + (kaddr & ~PAGE_MASK)); in dvma_page() 49 unsigned long vaddr; in dvma_map_iommu() local 51 vaddr = dvma_btov(baddr); in dvma_map_iommu() 53 end = vaddr + len; in dvma_map_iommu() 55 while(vaddr < end) { in dvma_map_iommu() 56 dvma_page(kaddr, vaddr); in dvma_map_iommu() [all …]
|
A D | mmu_emu.c | 117 void print_pte_vaddr (unsigned long vaddr) in print_pte_vaddr() argument 119 pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr)); in print_pte_vaddr() 120 print_pte (__pte (sun3_get_pte (vaddr))); in print_pte_vaddr() 287 vaddr &= ~SUN3_PMEG_MASK; in mmu_emu_map_pmeg() 296 curr_pmeg, context, vaddr); in mmu_emu_map_pmeg() 308 if(vaddr >= PAGE_OFFSET) { in mmu_emu_map_pmeg() 314 sun3_put_segmap (vaddr, curr_pmeg); in mmu_emu_map_pmeg() 324 sun3_put_segmap (vaddr, curr_pmeg); in mmu_emu_map_pmeg() 327 pmeg_vaddr[curr_pmeg] = vaddr; in mmu_emu_map_pmeg() 400 mmu_emu_map_pmeg (context, vaddr); in mmu_emu_handle_fault() [all …]
|
/linux/arch/sh/mm/ |
A D | kmap.c | 21 unsigned long vaddr; in kmap_coherent_init() local 24 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); in kmap_coherent_init() 25 kmap_coherent_pte = virt_to_kpte(vaddr); in kmap_coherent_init() 31 unsigned long vaddr; in kmap_coherent() local 42 vaddr = __fix_to_virt(idx); in kmap_coherent() 47 return (void *)vaddr; in kmap_coherent() 53 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; in kunmap_coherent() local 54 enum fixed_addresses idx = __virt_to_fix(vaddr); in kunmap_coherent() 57 __flush_purge_region((void *)vaddr, PAGE_SIZE); in kunmap_coherent() 59 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); in kunmap_coherent() [all …]
|
/linux/arch/um/kernel/ |
A D | mem.c | 101 unsigned long vaddr; in fixrange_init() local 103 vaddr = start; in fixrange_init() 104 i = pgd_index(vaddr); in fixrange_init() 105 j = pmd_index(vaddr); in fixrange_init() 109 p4d = p4d_offset(pgd, vaddr); in fixrange_init() 110 pud = pud_offset(p4d, vaddr); in fixrange_init() 113 pmd = pmd_offset(pud, vaddr); in fixrange_init() 116 vaddr += PMD_SIZE; in fixrange_init() 128 unsigned long v, vaddr = FIXADDR_USER_START; in fixaddr_user_init() local 143 pte = virt_to_kpte(vaddr); in fixaddr_user_init() [all …]
|
/linux/drivers/misc/sgi-gru/ |
A D | grufault.c | 73 vma = gru_find_vma(vaddr); in gru_find_lock_gts() 90 vma = gru_find_vma(vaddr); in gru_alloc_locked_gts() 260 vma = find_vma(mm, vaddr); in gru_vtop() 316 unsigned long vaddr = 0, gpa; in gru_preload_tlb() local 328 vaddr &= PAGE_MASK; in gru_preload_tlb() 329 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); in gru_preload_tlb() 331 while (vaddr > fault_vaddr) { in gru_preload_tlb() 340 vaddr -= PAGE_SIZE; in gru_preload_tlb() 398 vaddr = tfh->missvaddr; in gru_try_dropin() 509 tfh, vaddr); in gru_try_dropin() [all …]
|
/linux/arch/mips/mm/ |
A D | pgtable-32.c | 53 unsigned long vaddr; in pagetable_init() local 73 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); in pagetable_init() 74 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); in pagetable_init() 80 vaddr = PKMAP_BASE; in pagetable_init() 81 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); in pagetable_init() 83 pgd = swapper_pg_dir + pgd_index(vaddr); in pagetable_init() 84 p4d = p4d_offset(pgd, vaddr); in pagetable_init() 85 pud = pud_offset(p4d, vaddr); in pagetable_init() 86 pmd = pmd_offset(pud, vaddr); in pagetable_init() 87 pte = pte_offset_kernel(pmd, vaddr); in pagetable_init()
|
/linux/arch/sparc/include/asm/ |
A D | viking.h | 218 vaddr &= PAGE_MASK; in viking_hwprobe() 222 : "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); in viking_hwprobe() 229 : "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE)); in viking_hwprobe() 231 vaddr &= ~PGDIR_MASK; in viking_hwprobe() 232 vaddr >>= PAGE_SHIFT; in viking_hwprobe() 233 return val | (vaddr << 8); in viking_hwprobe() 239 : "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE)); in viking_hwprobe() 241 vaddr &= ~PMD_MASK; in viking_hwprobe() 242 vaddr >>= PAGE_SHIFT; in viking_hwprobe() 243 return val | (vaddr << 8); in viking_hwprobe() [all …]
|
/linux/arch/xtensa/mm/ |
A D | cache.c | 60 unsigned long vaddr) in kmap_invalidate_coherent() argument 62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in kmap_invalidate_coherent() 82 unsigned long vaddr, unsigned long *paddr) in coherent_kvaddr() argument 85 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); in coherent_kvaddr() 94 kmap_invalidate_coherent(page, vaddr); in clear_user_highpage() 102 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument 111 kmap_invalidate_coherent(dst, vaddr); in copy_user_highpage() 261 unsigned long vaddr, void *dst, const void *src, in copy_to_user_page() argument 265 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); in copy_to_user_page() 301 unsigned long vaddr, void *dst, const void *src, in copy_from_user_page() argument [all …]
|
/linux/include/linux/ |
A D | dma-buf-map.h | 118 void *vaddr; member 129 .vaddr = (vaddr_), \ 140 static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) in dma_buf_map_set_vaddr() argument 142 map->vaddr = vaddr; in dma_buf_map_set_vaddr() 179 return lhs->vaddr == rhs->vaddr; in dma_buf_map_is_equal() 196 return !map->vaddr; in dma_buf_map_is_null() 228 map->vaddr = NULL; in dma_buf_map_clear() 247 memcpy(dst->vaddr, src, len); in dma_buf_map_memcpy_to() 263 map->vaddr += incr; in dma_buf_map_incr()
|
/linux/arch/s390/kvm/ |
A D | gaccess.c | 641 if (vaddr.rfx) in guest_translate() 648 if (vaddr.rfx || vaddr.rsx) in guest_translate() 655 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) in guest_translate() 674 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) in guest_translate() 692 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) in guest_translate() 1023 if (vaddr.rfx) in kvm_s390_shadow_tables() 1029 if (vaddr.rfx || vaddr.rsx) in kvm_s390_shadow_tables() 1035 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) in kvm_s390_shadow_tables() 1059 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) in kvm_s390_shadow_tables() 1086 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) in kvm_s390_shadow_tables() [all …]
|
/linux/arch/csky/mm/ |
A D | tcm.c | 29 unsigned long vaddr, paddr; local 42 vaddr = __fix_to_virt(FIX_TCM - i); 45 pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 49 flush_tlb_one(vaddr); 61 vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); 64 pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); 68 flush_tlb_one(vaddr); 111 unsigned long vaddr; local 116 vaddr = gen_pool_alloc(tcm_pool, len); 117 if (!vaddr) [all …]
|