Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 1233) sorted by relevance

12345678910>>...50

/linux/net/ceph/
A Dpagevec.c19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
20 put_page(pages[i]); in ceph_put_page_vector()
22 kvfree(pages); in ceph_put_page_vector()
31 __free_pages(pages[i], 0); in ceph_release_page_vector()
32 kfree(pages); in ceph_release_page_vector()
41 struct page **pages; in ceph_alloc_page_vector() local
44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector()
45 if (!pages) in ceph_alloc_page_vector()
49 if (pages[i] == NULL) { in ceph_alloc_page_vector()
50 ceph_release_page_vector(pages, i); in ceph_alloc_page_vector()
[all …]
/linux/fs/isofs/
A Dcompress.c68 if (!pages[i]) in zisofs_uncompress_block()
122 if (pages[curpage]) { in zisofs_uncompress_block()
175 if (pages[curpage]) { in zisofs_uncompress_block()
309 struct page **pages; in zisofs_readpage() local
336 if (!pages) { in zisofs_readpage()
345 if (pages[i]) { in zisofs_readpage()
347 kmap(pages[i]); in zisofs_readpage()
355 if (pages[i]) { in zisofs_readpage()
359 kunmap(pages[i]); in zisofs_readpage()
362 put_page(pages[i]); in zisofs_readpage()
[all …]
/linux/mm/
A Dpercpu-vm.c34 static struct page **pages; in pcpu_get_pages() local
39 if (!pages) in pcpu_get_pages()
41 return pages; in pcpu_get_pages()
165 pages[pcpu_page_idx(cpu, i)] = page; in pcpu_unmap_pages()
197 PAGE_KERNEL, pages, PAGE_SHIFT); in __pcpu_map_pages()
279 struct page **pages; in pcpu_populate_chunk() local
281 pages = pcpu_get_pages(); in pcpu_populate_chunk()
282 if (!pages) in pcpu_populate_chunk()
315 struct page **pages; in pcpu_depopulate_chunk() local
322 pages = pcpu_get_pages(); in pcpu_depopulate_chunk()
[all …]
A Dgup_test.c18 put_page(pages[i]); in put_back_pages()
31 put_page(pages[i]); in put_back_pages()
49 page = pages[i]; in verify_dma_pinned()
104 struct page **pages; in __gup_test_ioctl() local
114 if (!pages) in __gup_test_ioctl()
138 pages + i); in __gup_test_ioctl()
146 pages + i); in __gup_test_ioctl()
155 pages + i, NULL); in __gup_test_ioctl()
160 pages + i, NULL); in __gup_test_ioctl()
163 pages + i, NULL); in __gup_test_ioctl()
[all …]
A Dgup.c1116 pages ? &pages[i] : NULL); in __get_user_pages()
1185 if (pages) { in __get_user_pages()
1377 pages += ret; in __get_user_pages_locked()
1423 pages++; in __get_user_pages_locked()
1644 if (pages) { in __get_user_pages_locked()
1646 if (pages[i]) in __get_user_pages_locked()
2650 pages, nr)) in gup_pmd_range()
2683 pages, nr)) in gup_pud_range()
2869 pages); in internal_get_user_pages_fast()
2916 pages); in get_user_pages_fast_only()
[all …]
/linux/drivers/gpu/drm/i915/gem/selftests/
A Dhuge_gem_object.c12 struct sg_table *pages) in huge_free_pages() argument
24 sg_free_table(pages); in huge_free_pages()
25 kfree(pages); in huge_free_pages()
34 struct sg_table *pages; in huge_get_pages() local
37 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages()
38 if (!pages) in huge_get_pages()
42 kfree(pages); in huge_get_pages()
46 sg = pages->sgl; in huge_get_pages()
64 src = pages->sgl; in huge_get_pages()
76 huge_free_pages(obj, pages); in huge_get_pages()
[all …]
/linux/fs/erofs/
A Dpcpubuf.c15 struct page **pages; member
67 pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); in erofs_pcpubuf_growsize()
68 if (!pages) { in erofs_pcpubuf_growsize()
75 if (!pages[i]) { in erofs_pcpubuf_growsize()
77 oldpages = pages; in erofs_pcpubuf_growsize()
84 oldpages = pages; in erofs_pcpubuf_growsize()
91 pcb->pages = pages; in erofs_pcpubuf_growsize()
139 if (!pcb->pages) in erofs_pcpubuf_exit()
143 if (pcb->pages[i]) in erofs_pcpubuf_exit()
145 kfree(pcb->pages); in erofs_pcpubuf_exit()
[all …]
/linux/drivers/xen/
A Dxlate_mmu.c71 struct page **pages; member
148 struct page **pages) in xen_xlate_remap_gfn_array() argument
163 data.pages = pages; in xen_xlate_remap_gfn_array()
217 struct page **pages; in xen_xlate_map_ballooned_pages() local
226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
227 if (!pages) in xen_xlate_map_ballooned_pages()
232 kfree(pages); in xen_xlate_map_ballooned_pages()
239 kfree(pages); in xen_xlate_map_ballooned_pages()
253 kfree(pages); in xen_xlate_map_ballooned_pages()
257 kfree(pages); in xen_xlate_map_ballooned_pages()
[all …]
/linux/kernel/dma/
A Dremap.c15 return area->pages; in dma_common_find_pages()
22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap()
42 struct page **pages; in dma_common_contiguous_remap() local
46 pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
47 if (!pages) in dma_common_contiguous_remap()
50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
52 kfree(pages); in dma_common_contiguous_remap()
/linux/drivers/gpu/drm/xen/
A Dxen_drm_front_gem.c30 struct page **pages; member
56 kvfree(xen_obj->pages); in gem_free_pages_array()
57 xen_obj->pages = NULL; in gem_free_pages_array()
136 if (IS_ERR(xen_obj->pages)) { in gem_create()
138 xen_obj->pages = NULL; in gem_create()
169 if (xen_obj->pages) { in xen_drm_front_gem_free_object_unlocked()
172 xen_obj->pages); in xen_drm_front_gem_free_object_unlocked()
188 return xen_obj->pages; in xen_drm_front_gem_get_pages()
195 if (!xen_obj->pages) in xen_drm_front_gem_get_sg_table()
231 xen_obj->pages); in xen_drm_front_gem_import_sg_table()
[all …]
/linux/drivers/staging/media/ipu3/
A Dipu3-dmamap.c26 __free_page(pages[count]); in imgu_dmamap_free_buffer()
27 kvfree(pages); in imgu_dmamap_free_buffer()
36 struct page **pages; in imgu_dmamap_alloc_buffer() local
42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer()
44 if (!pages) in imgu_dmamap_alloc_buffer()
77 pages[i++] = page++; in imgu_dmamap_alloc_buffer()
80 return pages; in imgu_dmamap_alloc_buffer()
100 struct page **pages; in imgu_dmamap_alloc() local
113 if (!pages) in imgu_dmamap_alloc()
131 map->pages = pages; in imgu_dmamap_alloc()
[all …]
/linux/Documentation/admin-guide/mm/
A Dhugetlbpage.rst32 and surplus huge pages in the pool of huge pages of default size.
48 is the size of the pool of huge pages.
71 pages of all sizes.
82 pages in the kernel's huge page pool. "Persistent" huge pages will be
173 default sized persistent huge pages::
207 huge pages can grow, if more huge pages than ``/proc/sys/vm/nr_hugepages`` are
215 surplus pages will first be promoted to persistent huge pages. Then, additional
228 of the in-use huge pages to surplus huge pages. This will occur even if
249 pages may exist::
272 pages size are allowed.
[all …]
/linux/fs/ramfs/
A Dfile-nommu.c65 struct page *pages; in ramfs_nommu_expand_for_mapping() local
84 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping()
85 if (!pages) in ramfs_nommu_expand_for_mapping()
92 split_page(pages, order); in ramfs_nommu_expand_for_mapping()
96 __free_page(pages + loop); in ramfs_nommu_expand_for_mapping()
100 data = page_address(pages); in ramfs_nommu_expand_for_mapping()
124 __free_page(pages + loop++); in ramfs_nommu_expand_for_mapping()
225 if (!pages) in ramfs_nommu_get_unmapped_area()
233 ptr = pages; in ramfs_nommu_get_unmapped_area()
244 ptr = pages; in ramfs_nommu_get_unmapped_area()
[all …]
/linux/drivers/staging/media/atomisp/pci/hmm/
A Dhmm_reserved_pool.c53 page_obj[i].page = repool_info->pages[j]; in get_pages_from_reserved_pool()
97 if (unlikely(!pool_info->pages)) { in hmm_reserved_pool_setup()
120 struct page *pages; in hmm_reserved_pool_init() local
146 pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order); in hmm_reserved_pool_init()
147 if (unlikely(!pages)) { in hmm_reserved_pool_init()
163 ret = set_pages_uc(pages, blk_pgnr); in hmm_reserved_pool_init()
167 __free_pages(pages, order); in hmm_reserved_pool_init()
172 repool_info->pages[i++] = pages + j; in hmm_reserved_pool_init()
216 ret = set_pages_wb(repool_info->pages[i], 1); in hmm_reserved_pool_exit()
228 __free_pages(repool_info->pages[i], 0); in hmm_reserved_pool_exit()
[all …]
/linux/net/rds/
A Dinfo.c65 struct page **pages; member
122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy()
127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy()
140 iter->pages++; in rds_info_copy()
166 struct page **pages = NULL; in rds_info_getsockopt() local
191 if (!pages) { in rds_info_getsockopt()
195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt()
214 iter.pages = pages; in rds_info_getsockopt()
237 if (pages) in rds_info_getsockopt()
238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
[all …]
/linux/fs/squashfs/
A Dpage_actor.c30 if (actor->next_page == actor->pages) in cache_next_page()
42 int pages, int length) in squashfs_page_actor_init() argument
49 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init()
51 actor->pages = pages; in squashfs_page_actor_init()
71 return actor->pageaddr = actor->next_page == actor->pages ? NULL : in direct_next_page()
82 int pages, int length) in squashfs_page_actor_init_special() argument
89 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init_special()
91 actor->pages = pages; in squashfs_page_actor_init_special()
A Dfile_direct.c22 int pages, struct page **page, int bytes);
36 int i, n, pages, missing_pages, bytes, res = -ENOMEM; in squashfs_readpage_block() local
44 pages = end_index - start_index + 1; in squashfs_readpage_block()
46 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block()
54 actor = squashfs_page_actor_init_special(page, pages, 0); in squashfs_readpage_block()
84 res = squashfs_read_cache(target_page, block, bsize, pages, in squashfs_readpage_block()
105 pageaddr = kmap_atomic(page[pages - 1]); in squashfs_readpage_block()
111 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
128 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
145 int pages, struct page **page, int bytes) in squashfs_read_cache() argument
[all …]
/linux/drivers/gpu/drm/i915/gem/
A Di915_gem_pages.c14 struct sg_table *pages, in __i915_gem_object_set_pages() argument
31 drm_clflush_sg(pages); in __i915_gem_object_set_pages()
40 obj->mm.pages = pages; in __i915_gem_object_set_pages()
204 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages()
206 return pages; in __i915_gem_object_unset_pages()
221 return pages; in __i915_gem_object_unset_pages()
298 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); in i915_gem_object_map_page()
299 if (!pages) in i915_gem_object_map_page()
305 pages[i++] = page; in i915_gem_object_map_page()
307 if (pages != stack) in i915_gem_object_map_page()
[all …]
A Di915_gem_phys.c95 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument
97 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys()
98 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys()
100 __i915_gem_object_release_shmem(obj, pages, false); in i915_gem_object_put_pages_phys()
130 sg_free_table(pages); in i915_gem_object_put_pages_phys()
131 kfree(pages); in i915_gem_object_put_pages_phys()
190 struct sg_table *pages; in i915_gem_object_shmem_to_phys() local
193 pages = __i915_gem_object_unset_pages(obj); in i915_gem_object_shmem_to_phys()
202 if (!IS_ERR_OR_NULL(pages)) in i915_gem_object_shmem_to_phys()
203 i915_gem_object_put_pages_shmem(obj, pages); in i915_gem_object_shmem_to_phys()
[all …]
/linux/Documentation/vm/
A Dunevictable-lru.rst15 pages.
30 pages and to hide these pages from vmscan. This mechanism is based on a patch
69 (1) We get to "treat unevictable pages just like we treat other pages in the
84 swap-backed pages. This differentiation is only important while the pages are,
192 "cull" such pages that it encounters: that is, it diverts those pages to the
231 prevented the management of the pages on an LRU list, and thus mlocked pages
245 mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable
301 get_user_pages() will be unable to fault in the pages. That's okay. If pages
348 such as the VDSO page, relay channel pages, etc. These pages
378 these pages for munlocking. Because we don't want to leave pages mlocked,
[all …]
/linux/arch/arm/plat-omap/
A Dsram.c65 int pages; in omap_sram_push() local
73 pages = PAGE_ALIGN(size) / PAGE_SIZE; in omap_sram_push()
75 set_memory_rw(base, pages); in omap_sram_push()
79 set_memory_ro(base, pages); in omap_sram_push()
80 set_memory_x(base, pages); in omap_sram_push()
101 int pages; in omap_map_sram() local
125 pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; in omap_map_sram()
127 set_memory_ro(base, pages); in omap_map_sram()
128 set_memory_x(base, pages); in omap_map_sram()
/linux/drivers/media/common/videobuf2/
A Dframe_vector.c107 struct page **pages; in put_vaddr_frames() local
111 pages = frame_vector_pages(vec); in put_vaddr_frames()
117 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames()
120 unpin_user_pages(pages, vec->nr_frames); in put_vaddr_frames()
139 struct page **pages; in frame_vector_to_pages() local
147 pages = (struct page **)nums; in frame_vector_to_pages()
149 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
165 struct page **pages; in frame_vector_to_pfns() local
169 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns()
170 nums = (unsigned long *)pages; in frame_vector_to_pfns()
[all …]
/linux/drivers/gpu/drm/
A Ddrm_scatter.c51 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup()
83 unsigned long pages, i, j; in drm_legacy_sg_alloc() local
103 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc()
104 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc()
106 entry->pages = pages; in drm_legacy_sg_alloc()
107 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc()
113 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc()
120 entry->virtual = vmalloc_32(pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
131 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
138 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc()
[all …]
/linux/drivers/block/xen-blkback/
A Dblkback.c252 unmap_data.pages = pages; in free_persistent_gnts()
294 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants()
647 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument
717 work->pages = req->unmap_pages; in xen_blkbk_unmap_and_respond()
751 pages += batch; in xen_blkbk_unmap()
757 struct grant_page *pages[], in xen_blkbk_map() argument
785 pages[i]->gref); in xen_blkbk_map()
797 &pages[i]->page)) { in xen_blkbk_map()
804 addr = vaddr(pages[i]->page); in xen_blkbk_map()
811 flags, pages[i]->gref, in xen_blkbk_map()
[all …]
/linux/drivers/net/ethernet/amd/xgbe/
A Dxgbe-desc.c141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring()
289 struct page *pages = NULL; in xgbe_alloc_pages() local
301 if (pages) in xgbe_alloc_pages()
313 if (!pages) in xgbe_alloc_pages()
320 put_page(pages); in xgbe_alloc_pages()
324 pa->pages = pages; in xgbe_alloc_pages()
336 get_page(pa->pages); in xgbe_set_buffer_data()
349 pa->pages = NULL; in xgbe_set_buffer_data()
490 if (rdata->rx.hdr.pa.pages) in xgbe_unmap_rdata()
[all …]

Completed in 63 milliseconds

12345678910>>...50