/linux/arch/microblaze/mm/ |
A D | pgtable.c | 62 p = addr & PAGE_MASK; in __ioremap() 119 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); in __ioremap() 132 vfree((void *) (PAGE_MASK & (unsigned long) addr)); in iounmap() 204 pgd = pgd_offset(mm, addr & PAGE_MASK); in get_pteptr() 206 p4d = p4d_offset(pgd, addr & PAGE_MASK); in get_pteptr() 207 pud = pud_offset(p4d, addr & PAGE_MASK); in get_pteptr() 208 pmd = pmd_offset(pud, addr & PAGE_MASK); in get_pteptr() 210 pte = pte_offset_kernel(pmd, addr & PAGE_MASK); in get_pteptr() 240 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); in iopa()
|
/linux/arch/m68k/kernel/ |
A D | sys_m68k.c | 63 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \ 104 paddr += addr & ~(PAGE_MASK | 15); in cache_flush_040() 107 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_040() 125 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; in cache_flush_040() 180 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); in cache_flush_040() 268 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_060() 286 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; in cache_flush_060() 319 addr &= PAGE_MASK; in cache_flush_060() 341 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); in cache_flush_060() 342 addr &= PAGE_MASK; /* Workaround for bug in some in cache_flush_060()
|
/linux/arch/arc/include/asm/ |
A D | pgtable-levels.h | 111 #define p4d_bad(x) ((p4d_val(x) & ~PAGE_MASK)) 114 #define p4d_pgtable(p4d) ((pud_t *)(p4d_val(p4d) & PAGE_MASK)) 133 #define pud_bad(x) ((pud_val(x) & ~PAGE_MASK)) 136 #define pud_pgtable(pud) ((pmd_t *)(pud_val(pud) & PAGE_MASK)) 160 #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) 163 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
|
/linux/arch/sh/mm/ |
A D | tlbflush_32.c | 25 page &= PAGE_MASK; in local_flush_tlb_page() 60 start &= PAGE_MASK; in local_flush_tlb_range() 62 end &= PAGE_MASK; in local_flush_tlb_range() 93 start &= PAGE_MASK; in local_flush_tlb_kernel_range() 95 end &= PAGE_MASK; in local_flush_tlb_kernel_range()
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ |
A D | fbmem.h | 63 u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK); in fbmem_peek() 64 u32 val = ioread32(p + (off & ~PAGE_MASK)); in fbmem_peek() 72 u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK); in fbmem_poke() 73 iowrite32(val, p + (off & ~PAGE_MASK)); in fbmem_poke()
|
/linux/arch/sparc/mm/ |
A D | iommu.c | 164 start &= PAGE_MASK; in iommu_flush_iotlb() 188 unsigned long off = paddr & ~PAGE_MASK; in __sbus_iommu_map_page() 208 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE) in __sbus_iommu_map_page() 283 unsigned int busa = dma_addr & PAGE_MASK; in sbus_iommu_unmap_page() 284 unsigned long off = dma_addr & ~PAGE_MASK; in sbus_iommu_unmap_page() 334 BUG_ON((va & ~PAGE_MASK) != 0); in sbus_iommu_alloc() 335 BUG_ON((addr & ~PAGE_MASK) != 0); in sbus_iommu_alloc() 336 BUG_ON((len & ~PAGE_MASK) != 0); in sbus_iommu_alloc() 406 BUG_ON((busa & ~PAGE_MASK) != 0); in sbus_iommu_free() 407 BUG_ON((len & ~PAGE_MASK) != 0); in sbus_iommu_free()
|
A D | fault_32.c | 367 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) in window_overflow_fault() 376 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) in window_underflow_fault() 388 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) in window_ret_fault()
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | doorbell.c | 56 if (page->user_virt == (virt & PAGE_MASK)) in mlx4_ib_db_map_user() 65 page->user_virt = (virt & PAGE_MASK); in mlx4_ib_db_map_user() 67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user() 79 (virt & ~PAGE_MASK); in mlx4_ib_db_map_user()
|
/linux/arch/microblaze/pci/ |
A D | indirect_pci.c | 148 resource_size_t base = cfg_addr & PAGE_MASK; in setup_indirect_pci() 152 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); in setup_indirect_pci() 153 if ((cfg_data & PAGE_MASK) != base) in setup_indirect_pci() 154 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); in setup_indirect_pci() 155 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); in setup_indirect_pci()
|
/linux/arch/mips/mm/ |
A D | tlb-r3k.c | 89 start &= PAGE_MASK; in local_flush_tlb_range() 91 end &= PAGE_MASK; in local_flush_tlb_range() 125 start &= PAGE_MASK; in local_flush_tlb_kernel_range() 127 end &= PAGE_MASK; in local_flush_tlb_kernel_range() 162 page &= PAGE_MASK; in local_flush_tlb_page() 203 address &= PAGE_MASK; in __update_tlb()
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | doorbell.c | 57 (page->user_virt == (virt & PAGE_MASK))) in mlx5_ib_db_map_user() 66 page->user_virt = (virt & PAGE_MASK); in mlx5_ib_db_map_user() 68 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user() 82 (virt & ~PAGE_MASK); in mlx5_ib_db_map_user()
|
/linux/arch/ia64/mm/ |
A D | ioremap.c | 69 page_base = phys_addr & PAGE_MASK; in ioremap() 77 offset = phys_addr & ~PAGE_MASK; in ioremap() 78 phys_addr &= PAGE_MASK; in ioremap() 121 vunmap((void *) ((unsigned long) addr & PAGE_MASK)); in iounmap()
|
/linux/arch/powerpc/sysdev/ |
A D | indirect_pci.c | 163 resource_size_t base = cfg_addr & PAGE_MASK; in setup_indirect_pci() 167 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); in setup_indirect_pci() 168 if ((cfg_data & PAGE_MASK) != base) in setup_indirect_pci() 169 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); in setup_indirect_pci() 170 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); in setup_indirect_pci()
|
/linux/arch/um/kernel/skas/ |
A D | uaccess.c | 79 (addr & ~PAGE_MASK); in do_op_one_page() 82 (addr & ~PAGE_MASK); in do_op_one_page() 114 while (addr < ((addr + remain) & PAGE_MASK)) { in buffer_op() 293 (((unsigned long) addr) & ~PAGE_MASK); in arch_futex_atomic_op_inuser() 296 ((unsigned long) addr & ~PAGE_MASK); in arch_futex_atomic_op_inuser() 372 uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK); in futex_atomic_cmpxchg_inatomic() 374 uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK); in futex_atomic_cmpxchg_inatomic()
|
/linux/arch/mips/lib/ |
A D | r3k_dump_tlb.c | 48 if ((entryhi & PAGE_MASK) != KSEG0 && in dump_tlb() 58 entryhi & PAGE_MASK, in dump_tlb() 60 entrylo0 & PAGE_MASK, in dump_tlb()
|
/linux/arch/powerpc/mm/ |
A D | ioremap_64.c | 24 paligned = addr & PAGE_MASK; in __ioremap_caller() 25 offset = addr & ~PAGE_MASK; in __ioremap_caller() 57 addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK); in iounmap()
|
A D | ioremap_32.c | 30 p = addr & PAGE_MASK; in __ioremap_caller() 31 offset = addr & ~PAGE_MASK; in __ioremap_caller() 91 vunmap((void *)(PAGE_MASK & (unsigned long)addr)); in iounmap()
|
/linux/arch/xtensa/mm/ |
A D | ioremap.c | 17 unsigned long offset = paddr & ~PAGE_MASK; in xtensa_ioremap() 23 paddr &= PAGE_MASK; in xtensa_ioremap() 61 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); in xtensa_iounmap()
|
/linux/arch/m68k/mm/ |
A D | memory.c | 110 pushcl040(paddr & PAGE_MASK); in cache_clear() 116 paddr &= PAGE_MASK; in cache_clear() 164 paddr &= PAGE_MASK; in cache_push()
|
A D | cache.c | 50 return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); in virt_to_phys_slow() 71 address &= PAGE_MASK; in flush_icache_user_range()
|
/linux/drivers/uio/ |
A D | uio_dfl.c | 31 uiomem->addr = r->start & PAGE_MASK; in uio_dfl_probe() 32 uiomem->offs = r->start & ~PAGE_MASK; in uio_dfl_probe() 34 + PAGE_SIZE - 1) & PAGE_MASK; in uio_dfl_probe()
|
/linux/arch/powerpc/kernel/ |
A D | isa-bridge.c | 44 WARN_ON_ONCE(ISA_IO_BASE & ~PAGE_MASK); in remap_isa_base() 45 WARN_ON_ONCE(pa & ~PAGE_MASK); in remap_isa_base() 46 WARN_ON_ONCE(size & ~PAGE_MASK); in remap_isa_base() 253 if ((cbase & ~PAGE_MASK) || (pbase & ~PAGE_MASK)) { in isa_bridge_init_non_pci()
|
/linux/arch/parisc/mm/ |
A D | ioremap.c | 71 offset = phys_addr & ~PAGE_MASK; in ioremap() 72 phys_addr &= PAGE_MASK; in ioremap() 95 unsigned long addr = (unsigned long)io_addr & PAGE_MASK; in iounmap()
|
/linux/kernel/ |
A D | kexec_core.c | 176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) in sanity_check_segment_list() 566 destination &= PAGE_MASK; in kimage_set_destination() 577 page &= PAGE_MASK; in kimage_add_page() 610 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 674 destination = entry & PAGE_MASK; in kimage_dst_used() 757 old_addr = *old & PAGE_MASK; in kimage_alloc_page() 760 *old = addr | (*old & ~PAGE_MASK); in kimage_alloc_page() 822 ptr += maddr & ~PAGE_MASK; in kimage_load_normal_segment() 824 PAGE_SIZE - (maddr & ~PAGE_MASK)); in kimage_load_normal_segment() 884 ptr += maddr & ~PAGE_MASK; in kimage_load_crash_segment() [all …]
|
/linux/arch/arm64/mm/ |
A D | ioremap.c | 24 unsigned long offset = phys_addr & ~PAGE_MASK; in __ioremap_caller() 33 phys_addr &= PAGE_MASK; in __ioremap_caller() 73 unsigned long addr = (unsigned long)io_addr & PAGE_MASK; in iounmap()
|