/linux/include/linux/ |
A D | gfp.h | 614 alloc_pages(gfp_mask, order) 616 alloc_pages(gfp_mask, order) 618 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) argument 619 #define alloc_page_vma(gfp_mask, vma, addr) \ argument 629 #define __get_free_page(gfp_mask) \ argument 630 __get_free_pages((gfp_mask), 0) 632 #define __get_dma_pages(gfp_mask, order) \ argument 641 unsigned int fragsz, gfp_t gfp_mask, 645 unsigned int fragsz, gfp_t gfp_mask) in page_frag_alloc() argument 672 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); [all …]
|
A D | cpuset.h | 83 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); 85 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 88 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed() 92 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 94 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 97 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 100 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 226 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 231 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 236 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
A D | mempool.h | 13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 36 gfp_t gfp_mask, int node_id); 44 gfp_t gfp_mask, int nid); 48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; 56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
A D | blk-crypto.h | 86 gfp_t gfp_mask); 115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 128 gfp_t gfp_mask) in bio_crypt_clone() argument 131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
A D | page_owner.h | 13 unsigned short order, gfp_t gfp_mask); 28 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() 59 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
|
A D | swap.h | 386 gfp_t gfp_mask, nodemask_t *mask); 390 gfp_t gfp_mask, 393 gfp_t gfp_mask, bool noswap, 581 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument 604 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument 609 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument 644 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument 730 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); 731 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) in cgroup_throttle_swaprate() argument 735 __cgroup_throttle_swaprate(page, gfp_mask); in cgroup_throttle_swaprate() [all …]
|
/linux/block/ |
A D | blk-lib.c | 27 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument 98 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 192 bio = blk_next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same() 228 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 248 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 268 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_write_zeroes() 304 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument 320 gfp_mask); in __blkdev_issue_zero_pages() 394 sector_t nr_sects, gfp_t gfp_mask, unsigned flags) in blkdev_issue_zeroout() argument 411 gfp_mask, &bio, flags); in blkdev_issue_zeroout() [all …]
|
A D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() argument 130 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument 140 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 155 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov() 183 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov() 234 gfp_t gfp_mask) in bio_map_user_iov() argument 340 unsigned int len, gfp_t gfp_mask) in bio_map_kern() argument 351 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern() 438 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_kern() 567 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() argument [all …]
|
A D | blk-crypto.c | 82 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument 90 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx() 92 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx() 106 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument 108 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone() 297 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument 300 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
|
/linux/mm/ |
A D | mempool.c | 181 gfp_t gfp_mask, int node_id) in mempool_init_node() argument 191 gfp_mask, node_id); in mempool_init_node() 262 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 271 gfp_mask, node_id)) { in mempool_create_node() 381 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 414 if (gfp_temp != gfp_mask) { in mempool_alloc() 416 gfp_temp = gfp_mask; in mempool_alloc() 421 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { in mempool_alloc() 512 return kmem_cache_alloc(mem, gfp_mask); in mempool_alloc_slab() 530 return kmalloc(size, gfp_mask); in mempool_kmalloc() [all …]
|
A D | page_owner.c | 26 gfp_t gfp_mask; member 152 unsigned short order, gfp_t gfp_mask) in __set_page_owner_handle() argument 161 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 173 gfp_t gfp_mask) in __set_page_owner() argument 181 handle = save_stack(gfp_mask); in __set_page_owner() 225 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __folio_copy_owner() 342 &page_owner->gfp_mask, page_owner->pid, in print_page_owner() 394 gfp_t gfp_mask; in __dump_page_owner() local 403 gfp_mask = page_owner->gfp_mask; in __dump_page_owner() 404 mt = gfp_migratetype(gfp_mask); in __dump_page_owner() [all …]
|
A D | page_alloc.c | 4105 gfp_mask)) { in get_page_from_freelist() 4214 current->comm, &vaf, gfp_mask, &gfp_mask, in warn_alloc() 4252 .gfp_mask = gfp_mask, in __alloc_pages_may_oom() 4545 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_acquire() 4548 if (gfp_mask & __GFP_FS) in fs_reclaim_acquire() 4562 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_release() 5483 gfp_t gfp_mask) in __page_frag_cache_refill() argument 5486 gfp_t gfp = gfp_mask; in __page_frag_cache_refill() 9050 .gfp_mask = current_gfp_context(gfp_mask), in alloc_contig_range() 9173 gfp_mask); in __alloc_contig_pages() [all …]
|
A D | vmscan.c | 138 gfp_t gfp_mask; member 824 .gfp_mask = gfp_mask, in shrink_slab_memcg() 928 .gfp_mask = gfp_mask, in shrink_slab() 1969 .gfp_mask = GFP_KERNEL, in reclaim_clean_pages_from_list() 2601 .gfp_mask = GFP_KERNEL, in reclaim_pages() 3794 .gfp_mask = current_gfp_context(gfp_mask), in try_to_free_pages() 3851 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | in mem_cgroup_shrink_node() 3875 gfp_t gfp_mask, in try_to_free_mem_cgroup_pages() argument 3882 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | in try_to_free_mem_cgroup_pages() 4749 .gfp_mask = current_gfp_context(gfp_mask), in __node_reclaim() [all …]
|
A D | swap_state.c | 459 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 491 if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) in __read_swap_cache_async() 521 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 525 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async() 645 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead() 661 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swap_cluster_readahead() 817 page = __read_swap_cache_async(entry, gfp_mask, vma, in swap_vma_readahead() 833 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, in swap_vma_readahead() 849 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead() argument 853 swap_vma_readahead(entry, gfp_mask, vmf) : in swapin_readahead() [all …]
|
/linux/fs/nfs/blocklayout/ |
A D | dev.c | 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 431 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 460 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe() 480 return bl_parse_simple(server, d, volumes, idx, gfp_mask); in bl_parse_deviceid() 482 return bl_parse_slice(server, d, volumes, idx, gfp_mask); in bl_parse_deviceid() 488 return bl_parse_scsi(server, d, volumes, idx, gfp_mask); in bl_parse_deviceid() 497 gfp_t gfp_mask) in bl_alloc_deviceid_node() argument 508 scratch = alloc_page(gfp_mask); in bl_alloc_deviceid_node() 521 gfp_mask); in bl_alloc_deviceid_node() [all …]
|
/linux/fs/btrfs/ |
A D | ulist.h | 48 struct ulist *ulist_alloc(gfp_t gfp_mask); 50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 52 u64 *old_aux, gfp_t gfp_mask); 57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
A D | ulist.c | 92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
/linux/include/linux/sched/ |
A D | mm.h | 208 extern void fs_reclaim_acquire(gfp_t gfp_mask); 209 extern void fs_reclaim_release(gfp_t gfp_mask); 213 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument 214 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument 225 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument 227 fs_reclaim_acquire(gfp_mask); in might_alloc() 228 fs_reclaim_release(gfp_mask); in might_alloc() 230 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
|
/linux/lib/ |
A D | generic-radix-tree.c | 79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument 83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); in genradix_alloc_node() 90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); in genradix_alloc_node() 105 gfp_t gfp_mask) in __genradix_ptr_alloc() argument 122 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 145 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 218 gfp_t gfp_mask) in __genradix_prealloc() argument 223 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
|
A D | scatterlist.c | 161 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc() 162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc() 166 gfp_mask); in sg_kmalloc() 318 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table() 376 NULL, 0, gfp_mask, sg_kmalloc); in sg_alloc_table() 386 gfp_t gfp_mask) in get_next_sg() argument 399 new_sg = sg_kmalloc(alloc_size, gfp_mask); in get_next_sg() 445 unsigned int left_pages, gfp_t gfp_mask) in sg_alloc_append_table_from_pages() argument 514 gfp_mask); in sg_alloc_append_table_from_pages() 568 gfp_t gfp_mask) in sg_alloc_table_from_pages_segment() argument [all …]
|
/linux/net/sunrpc/auth_gss/ |
A D | gss_krb5_mech.c | 312 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument 339 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3() 357 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument 373 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 469 gfp_t gfp_mask) in gss_import_v2_context() argument 522 gss_kerberos_mech.gm_oid.len, gfp_mask); in gss_import_v2_context() 531 return context_derive_keys_des3(ctx, gfp_mask); in gss_import_v2_context() 534 return context_derive_keys_new(ctx, gfp_mask); in gss_import_v2_context() 547 gfp_t gfp_mask) in gss_import_sec_context_kerberos() argument 553 ctx = kzalloc(sizeof(*ctx), gfp_mask); in gss_import_sec_context_kerberos() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
A D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() 167 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
/linux/drivers/connector/ |
A D | connector.c | 62 gfp_t gfp_mask) in cn_netlink_send_mult() argument 96 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 114 gfp_mask); in cn_netlink_send_mult() 116 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 122 gfp_t gfp_mask) in cn_netlink_send() argument 124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()
|
/linux/kernel/power/ |
A D | snapshot.c | 193 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 199 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 208 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() argument 217 return get_image_page(gfp_mask, PG_SAFE); in __get_safe_page() 220 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument 222 return (unsigned long)__get_safe_page(gfp_mask); in get_safe_page() 229 page = alloc_page(gfp_mask); in alloc_image_page() 295 gfp_t gfp_mask; /* mask for allocating pages */ member 304 ca->gfp_mask = gfp_mask; in chain_init() 316 get_image_page(ca->gfp_mask, PG_ANY); in chain_alloc() [all …]
|
/linux/fs/ntfs/ |
A D | malloc.h | 28 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument 33 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc() 37 return __vmalloc(size, gfp_mask); in __ntfs_malloc()
|