/linux/mm/ |
A D | truncate.c | 69 if (shmem_mapping(mapping)) in truncate_exceptional_pvec_entries() 79 dax = dax_mapping(mapping); in truncate_exceptional_pvec_entries() 119 if (shmem_mapping(mapping) || dax_mapping(mapping)) in invalidate_exceptional_entry() 135 if (dax_mapping(mapping)) in invalidate_exceptional_entry2() 210 if (page->mapping != mapping) in invalidate_complete_page() 225 if (page->mapping != mapping) in truncate_inode_page() 238 if (!mapping) in generic_error_remove_page() 259 if (!mapping) in invalidate_inode_page() 574 if (page->mapping != mapping) in invalidate_complete_page2() 607 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) in do_launder_page() [all …]
|
A D | filemap.c | 226 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local 854 struct address_space *mapping = old->mapping; in replace_page_cache_page() local 864 new->mapping = mapping; in replace_page_cache_page() 902 folio->mapping = mapping; in __filemap_add_folio() 1925 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio() 2111 if (page->mapping != mapping || PageWriteback(page)) in find_lock_entries() 2811 if (unlikely(page->mapping != mapping)) in page_seek_hole_data() 3258 if (page->mapping != mapping) in next_uptodate_page() 3361 if (page->mapping != mapping) { in filemap_page_mkwrite() 3941 struct address_space * const mapping = page->mapping; in try_to_release_page() local [all …]
|
A D | readahead.c | 53 page->mapping = mapping; in read_cache_pages_invalidate_page() 55 page->mapping = NULL; in read_cache_pages_invalidate_page() 98 readahead_gfp_mask(mapping))) { in read_cache_pages() 176 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 194 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 218 if (mapping->a_ops->readpages) { in page_cache_ra_unbounded() 239 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded() 278 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local 283 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages && in force_page_cache_ra() 284 !mapping->a_ops->readahead)) in force_page_cache_ra() [all …]
|
A D | page-writeback.c | 2232 if (unlikely(page->mapping != mapping)) { in write_cache_pages() 2346 ret = write_cache_pages(mapping, wbc, __writepage, mapping); in generic_writepages() 2364 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages() 2403 struct address_space *mapping = folio->mapping; in folio_write_one() local 2546 if (mapping->host) { in filemap_dirty_folio() 2568 struct address_space *mapping = folio->mapping; in folio_account_redirty() local 2570 if (mapping && mapping_can_writeback(mapping)) { in folio_account_redirty() 2727 if (mapping && mapping_can_writeback(mapping)) { in folio_clear_dirty_for_io() 2807 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_end_writeback() 2828 if (mapping->host && !mapping_tagged(mapping, in __folio_end_writeback() [all …]
|
/linux/drivers/gpu/drm/tegra/ |
A D | uapi.c | 21 dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, in tegra_drm_mapping_release() 24 host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); in tegra_drm_mapping_release() 27 kfree(mapping); in tegra_drm_mapping_release() 175 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in tegra_drm_ioctl_channel_map() 192 host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova); in tegra_drm_ioctl_channel_map() 211 mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL); in tegra_drm_ioctl_channel_map() 217 err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction, in tegra_drm_ioctl_channel_map() 222 mapping->iova = sg_dma_address(mapping->sgt->sgl); in tegra_drm_ioctl_channel_map() 225 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; in tegra_drm_ioctl_channel_map() 238 dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, in tegra_drm_ioctl_channel_map() [all …]
|
/linux/include/linux/ |
A D | pagemap.h | 23 return xa_empty(&mapping->i_pages); in mapping_empty() 113 if (mapping->host) in mapping_set_error() 135 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable() 160 return mapping->gfp_mask; in mapping_gfp_mask() 213 atomic_inc(&mapping->nr_thps); in filemap_nr_thps_inc() 252 return folio->mapping; in folio_file_mapping() 283 return folio->mapping->host; in folio_inode() 511 mapping_gfp_mask(mapping)); in grab_cache_page_nowait() 637 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); in grab_cache_page() 994 .mapping = m, \ [all …]
|
A D | io-mapping.h | 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 90 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc() 91 phys_addr = mapping->base + offset; in io_mapping_map_local_wc() 107 BUG_ON(offset >= mapping->size); in io_mapping_map_wc() 108 phys_addr = mapping->base + offset; in io_mapping_map_wc() 141 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 143 iounmap(mapping->iomem); in io_mapping_fini() [all …]
|
A D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() 267 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 271 if (!mapping) { in __calc_tpm2_event_size() 276 mapping = marker; in __calc_tpm2_event_size() [all …]
|
A D | cleancache.h | 52 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) in cleancache_fs_enabled_mapping() argument 54 return mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled_mapping() 58 return cleancache_fs_enabled_mapping(page->mapping); in cleancache_fs_enabled() 104 static inline void cleancache_invalidate_page(struct address_space *mapping, in cleancache_invalidate_page() argument 108 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_page() 109 __cleancache_invalidate_page(mapping, page); in cleancache_invalidate_page() 112 static inline void cleancache_invalidate_inode(struct address_space *mapping) in cleancache_invalidate_inode() argument 114 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_inode() 115 __cleancache_invalidate_inode(mapping); in cleancache_invalidate_inode()
|
A D | secretmem.h | 11 struct address_space *mapping; in page_is_secretmem() local 23 mapping = (struct address_space *) in page_is_secretmem() 24 ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); in page_is_secretmem() 26 if (!mapping || mapping != page->mapping) in page_is_secretmem() 29 return mapping->a_ops == &secretmem_aops; in page_is_secretmem()
|
/linux/drivers/gpu/drm/panfrost/ |
A D | panfrost_gem.c | 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 95 kfree(mapping); in panfrost_gem_mapping_release() 100 if (!mapping) in panfrost_gem_mapping_put() 124 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in panfrost_gem_open() 125 if (!mapping) in panfrost_gem_open() 131 mapping->obj = bo; in panfrost_gem_open() 146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode, in panfrost_gem_open() [all …]
|
/linux/drivers/media/usb/uvc/ |
A D | uvc_ctrl.c | 949 s32 value = mapping->get(mapping, UVC_GET_CUR, data); in __uvc_ctrl_get_value() 1168 v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, in __uvc_query_v4l2_ctrl() 1266 bitmap = mapping->get(mapping, UVC_GET_RES, in uvc_query_v4l2_menu() 1730 min = mapping->get(mapping, UVC_GET_MIN, in uvc_ctrl_set() 1732 max = mapping->get(mapping, UVC_GET_MAX, in uvc_ctrl_set() 1734 step = mapping->get(mapping, UVC_GET_RES, in uvc_ctrl_set() 1769 step = mapping->get(mapping, UVC_GET_RES, in uvc_ctrl_set() 1810 mapping->set(mapping, value, in uvc_ctrl_set() 2234 uvc_map_get_name(mapping), mapping->id); in uvc_ctrl_add_mapping() 2281 uvc_map_get_name(mapping), mapping->id); in uvc_ctrl_add_mapping() [all …]
|
/linux/fs/ |
A D | dax.c | 356 page->mapping = mapping; in dax_associate_entry() 373 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry() 374 page->mapping = NULL; in dax_disassociate_entry() 408 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local 411 if (!mapping || !dax_mapping(mapping)) in dax_lock_page() 427 if (mapping != page->mapping) { in dax_lock_page() 449 struct address_space *mapping = page->mapping; in dax_unlock_page() local 607 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page_range() 812 i_mmap_lock_read(mapping); in dax_entry_mkclean() 1430 .inode = mapping->host, in dax_iomap_pte_fault() [all …]
|
A D | buffer.c | 549 &mapping->private_list); in sync_mapping_buffers() 584 &mapping->private_list); in mark_buffer_dirty_inode() 585 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode() 621 if (unlikely(!mapping)) in __set_page_dirty_buffers() 687 mapping = bh->b_assoc_map; in fsync_buffers_list() 694 bh->b_assoc_map = mapping; in fsync_buffers_list() 726 mapping = bh->b_assoc_map; in fsync_buffers_list() 733 &mapping->private_list); in fsync_buffers_list() 1107 if (mapping) in mark_buffer_dirty() 1111 if (mapping) in mark_buffer_dirty() [all …]
|
/linux/drivers/gpu/drm/exynos/ |
A D | exynos_drm_dma.c | 69 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 93 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 110 if (!priv->mapping) { in exynos_drm_register_dma() 111 void *mapping; in exynos_drm_register_dma() local 117 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma() 119 mapping = ERR_PTR(-ENODEV); in exynos_drm_register_dma() 121 if (IS_ERR(mapping)) in exynos_drm_register_dma() 122 return PTR_ERR(mapping); in exynos_drm_register_dma() 123 priv->mapping = mapping; in exynos_drm_register_dma() 143 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma() [all …]
|
/linux/arch/arm/mm/ |
A D | dma-mapping.c | 1174 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova() 2064 if (!mapping) in arm_iommu_create_mapping() 2079 mapping->base = base; in arm_iommu_create_mapping() 2089 return mapping; in arm_iommu_create_mapping() 2095 kfree(mapping); in arm_iommu_create_mapping() 2111 kfree(mapping); in release_iommu_mapping() 2118 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping() 2122 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping() 2134 if (mapping) in arm_iommu_release_mapping() 2194 if (!mapping) { in arm_iommu_detach_device() [all …]
|
A D | flush.c | 251 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 265 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 273 struct address_space *mapping; in __sync_icache_dcache() local 284 mapping = page_mapping_file(page); in __sync_icache_dcache() 286 mapping = NULL; in __sync_icache_dcache() 289 __flush_dcache_page(mapping, page); in __sync_icache_dcache() 317 struct address_space *mapping; in flush_dcache_page() local 332 mapping = page_mapping_file(page); in flush_dcache_page() 335 mapping && !page_mapcount(page)) in flush_dcache_page() 339 if (mapping && cache_is_vivt()) in flush_dcache_page() [all …]
|
/linux/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_gem.c | 222 return mapping; in etnaviv_gem_get_vram_mapping() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 251 if (mapping) { in etnaviv_gem_mapping_get() 265 if (mapping) in etnaviv_gem_mapping_get() 284 if (!mapping) { in etnaviv_gem_mapping_get() 285 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in etnaviv_gem_mapping_get() 286 if (!mapping) { in etnaviv_gem_mapping_get() 298 mapping->use = 1; in etnaviv_gem_mapping_get() 305 kfree(mapping); in etnaviv_gem_mapping_get() 318 return mapping; in etnaviv_gem_mapping_get() [all …]
|
A D | etnaviv_mmu.c | 123 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 247 mapping->iova = iova; in etnaviv_iommu_map_gem() 254 node = &mapping->vram_node; in etnaviv_iommu_map_gem() 265 mapping->iova = node->start; in etnaviv_iommu_map_gem() 285 WARN_ON(mapping->use); in etnaviv_iommu_unmap_gem() 293 list_del(&mapping->mmu_node); in etnaviv_iommu_unmap_gem() 361 if (mapping->use > 0) { in etnaviv_iommu_get_suballoc_va() 362 mapping->use++; in etnaviv_iommu_get_suballoc_va() 385 mapping->iova = node->start; in etnaviv_iommu_get_suballoc_va() 398 mapping->use = 1; in etnaviv_iommu_get_suballoc_va() [all …]
|
/linux/fs/gfs2/ |
A D | aops.c | 91 struct inode *inode = page->mapping->host; in gfs2_writepage() 153 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage() 179 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 239 struct inode *inode = mapping->host; in gfs2_write_jdata_pagevec() 256 if (unlikely(page->mapping != mapping)) { in gfs2_write_jdata_pagevec() 396 mapping->writeback_index = done_index; in gfs2_write_cache_jdata() 416 ret = gfs2_write_cache_jdata(mapping, wbc); in gfs2_jdata_writepages() 420 ret = gfs2_write_cache_jdata(mapping, wbc); in gfs2_jdata_writepages() 470 struct inode *inode = page->mapping->host; in __gfs2_readpage() 558 struct inode *inode = rac->mapping->host; in gfs2_readahead() [all …]
|
/linux/drivers/sh/clk/ |
A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping() 378 kref_get(&mapping->ref); in clk_establish_mapping() 381 clk->mapping = mapping; in clk_establish_mapping() 394 iounmap(mapping->base); in clk_destroy_mapping() 399 struct clk_mapping *mapping = clk->mapping; in clk_teardown_mapping() local [all …]
|
/linux/arch/nios2/mm/ |
A D | cacheflush.c | 82 flush_dcache_mmap_lock(mapping); in flush_aliases() 95 flush_dcache_mmap_unlock(mapping); in flush_aliases() 175 struct address_space *mapping; in flush_dcache_page() local 184 mapping = page_mapping_file(page); in flush_dcache_page() 187 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 191 if (mapping) { in flush_dcache_page() 193 flush_aliases(mapping, page); in flush_dcache_page() 207 struct address_space *mapping; in update_mmu_cache() local 222 mapping = page_mapping_file(page); in update_mmu_cache() 226 if(mapping) in update_mmu_cache() [all …]
|
/linux/Documentation/driver-api/ |
A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 72 undoes the side effects of the mapping functions. 77 void *io_mapping_map_wc(struct io_mapping *mapping, [all …]
|
/linux/drivers/net/ethernet/broadcom/bnxt/ |
A D | bnxt_xdp.c | 25 dma_addr_t mapping, u32 len) in bnxt_xmit_bd() argument 40 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd() 52 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp() 59 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument 64 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp_redirect() 67 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect() 88 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp() 119 dma_addr_t mapping; in bnxt_rx_xdp() local 132 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp() 178 dma_unmap_page_attrs(&pdev->dev, mapping, in bnxt_rx_xdp() [all …]
|
/linux/drivers/net/wireless/marvell/mwifiex/ |
A D | util.h | 69 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 73 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 77 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 81 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 86 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 88 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 90 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|