/linux/lib/ |
A D | test_vmalloc.c | 156 p = vmalloc(n * PAGE_SIZE); in random_size_alloc_test() 175 ptr = vmalloc(sizeof(void *) * 15000); in long_busy_list_alloc_test() 180 ptr[i] = vmalloc(1 * PAGE_SIZE); in long_busy_list_alloc_test() 183 ptr_1 = vmalloc(100 * PAGE_SIZE); in long_busy_list_alloc_test() 187 ptr_2 = vmalloc(1 * PAGE_SIZE); in long_busy_list_alloc_test() 221 ptr = vmalloc(sizeof(void *) * junk_length); in full_fit_alloc_test() 232 ptr[i] = vmalloc(1 * PAGE_SIZE); in full_fit_alloc_test() 233 junk_ptr[i] = vmalloc(1 * PAGE_SIZE); in full_fit_alloc_test() 240 tmp = vmalloc(1 * PAGE_SIZE); in full_fit_alloc_test() 331 p = vmalloc(1 * PAGE_SIZE); in kvfree_rcu_1_arg_vmalloc_test() [all …]
|
A D | decompress_unxz.c | 155 #undef vmalloc 159 #define vmalloc(size) malloc(size) macro
|
A D | Kconfig.kasan | 181 bool "Back mappings in vmalloc space with real shadow memory" 184 By default, the shadow region for vmalloc space is the read-only 186 vmalloc space. 188 Enabling this option will hook in to vmap/vmalloc and back those
|
/linux/samples/kmemleak/ |
A D | kmemleak-test.c | 57 pr_info("vmalloc(64) = %p\n", vmalloc(64)); in kmemleak_test_init() 58 pr_info("vmalloc(64) = %p\n", vmalloc(64)); in kmemleak_test_init() 59 pr_info("vmalloc(64) = %p\n", vmalloc(64)); in kmemleak_test_init() 60 pr_info("vmalloc(64) = %p\n", vmalloc(64)); in kmemleak_test_init() 61 pr_info("vmalloc(64) = %p\n", vmalloc(64)); in kmemleak_test_init()
|
/linux/scripts/coccinelle/api/ |
A D | kfree_mismatch.cocci | 4 /// vmalloc'ed by vfree functions and kvmalloc'ed by kvfree 31 E = \(vmalloc\|vzalloc\|vmalloc_user\|vmalloc_node\| 44 E = \(vmalloc\|vzalloc\|vmalloc_user\|vmalloc_node\| 71 …... when != if (...) { ... E = \(vmalloc\|vzalloc\|vmalloc_user\|vmalloc_node\|vzalloc_node\|vmall… 85 …... when != if (...) { ... E = \(vmalloc\|vzalloc\|vmalloc_user\|vmalloc_node\|vzalloc_node\|vmall… 97 * E = \(vmalloc\|vzalloc\|vmalloc_user\|vmalloc_node\|vzalloc_node\| 110 E = \(vmalloc\|vzalloc\|vmalloc_user\|vmalloc_node\|vzalloc_node\| 185 msg = "WARNING vmalloc is used to allocate this memory at line %s" % (a[0].line) 193 msg = "WARNING vmalloc is used to allocate this memory at line %s" % (a[0].line) 218 msg = "WARNING kmalloc (line %s) && vmalloc (line %s) are used to allocate this memory" % (ka[0].li… [all …]
|
A D | kvmalloc.cocci | 3 /// Find if/else condition with kmalloc/vmalloc calls. 41 * E = \(vmalloc\|vzalloc\|vmalloc_node\|vzalloc_node\)(..., size, ...) 53 * E = \(vmalloc\|vzalloc\|vmalloc_node\|vzalloc_node\)(..., size, ...) 65 * x = \(vmalloc\|vzalloc\|vmalloc_node\|vzalloc_node\)(..., size, ...) 97 - E = vmalloc(size); 103 - E = vmalloc(size); 108 - E = vmalloc(size); 113 - E = vmalloc(size); 118 - x = vmalloc(size); 123 - x = vmalloc(size);
|
/linux/Documentation/translations/zh_CN/core-api/ |
A D | memory-allocation.rst | 20 系列分配小块内存,使用 `vmalloc` 及其派生产品分配大的几乎连续的区域,或者 123 对于大量的分配,你可以使用vmalloc()和vzalloc(),或者直接向页面分配器请求页面。由vmalloc 127 试用kmalloc分配内存,如果分配失败,将用 `vmalloc` 重新尝试。对于哪些GFP标志可以与 `kvmalloc` 136 当分配的内存不再需要时,它必须被释放。你可以使用kvfree()来处理用 `kmalloc` 、 `vmalloc`
|
A D | gfp_mask-from-fs-io.rst | 60 vmalloc不支持GFP_NOFS语义,因为在分配器的深处有硬编码的GFP_KERNEL分配,要修 61 复这些分配是相当不容易的。这意味着用GFP_NOFS/GFP_NOIO调用 ``vmalloc`` 几乎 64 在理想的世界中,上层应该已经标记了危险的上下文,因此不需要特别的照顾, ``vmalloc`` 66 推荐的方法是用范围API包装vmalloc,并加上注释来解释问题。
|
/linux/Documentation/translations/zh_CN/dev-tools/ |
A D | kasan.rst | 286 小区域)。对于所有其他区域 —— 例如vmalloc和vmemmap空间 —— 一个只读页面被映射 293 这也造成了与 ``VMAP_STACK`` 的不兼容:如果堆栈位于vmalloc空间中,它将被分配 299 使用 ``CONFIG_KASAN_VMALLOC`` ,KASAN可以以更大的内存使用为代价覆盖vmalloc 302 这通过连接到vmalloc和vmap并动态分配真实的影子内存来支持映射。 304 vmalloc空间中的大多数映射都很小,需要不到一整页的阴影空间。因此,为每个映射 308 相反,KASAN跨多个映射共享后备空间。当vmalloc空间中的映射使用影子区域的特定 309 页面时,它会分配一个后备页面。此页面稍后可以由其他vmalloc映射共享。 313 为了避免交换映射的困难,KASAN预测覆盖vmalloc空间的阴影区域部分将不会被早期
|
/linux/scripts/coccinelle/api/alloc/ |
A D | alloc_cast.cocci | 35 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 58 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 81 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 98 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
|
A D | zalloc-simple.cocci | 37 * x = (T)\(kmalloc(E1, ...)\|vmalloc(E1)\|dma_alloc_coherent(...,E1,...)\| 64 - x = vmalloc(E1); 67 - x = (T *)vmalloc(E1); 70 - x = (T)vmalloc(E1); 182 x = (T)vmalloc@p(E1); 200 msg="WARNING: vzalloc should be used for %s, instead of vmalloc/memset" % (x)
|
/linux/Documentation/core-api/ |
A D | gfp_mask-from-fs-io.rst | 58 vmalloc doesn't support GFP_NOFS semantic because there are hardcoded 60 to fix up. That means that calling ``vmalloc`` with GFP_NOFS/GFP_NOIO is 65 and so no special care is required and vmalloc should be called without 67 layering violations then the recommended way around that is to wrap ``vmalloc``
|
A D | memory-allocation.rst | 9 large virtually contiguous areas using `vmalloc` and its derivatives, 154 For large allocations you can use vmalloc() and vzalloc(), or directly 155 request pages from the page allocator. The memory allocated by `vmalloc` 161 will be retried with `vmalloc`. There are restrictions on which GFP 174 use kvfree() for the memory allocated with `kmalloc`, `vmalloc` and
|
/linux/fs/jffs2/ |
A D | compr_lzo.c | 32 lzo_mem = vmalloc(LZO1X_MEM_COMPRESS); in alloc_workspace() 33 lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); in alloc_workspace()
|
A D | compr_zlib.c | 45 def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, in alloc_workspaces() 52 inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); in alloc_workspaces()
|
/linux/drivers/gpu/drm/vmwgfx/ |
A D | vmwgfx_fb.c | 42 void *vmalloc; member 238 src_ptr = (u8 *)par->vmalloc + in vmw_fb_dirty_flush() 662 par->vmalloc = NULL; in vmw_fb_init() 678 par->vmalloc = vzalloc(fb_size); in vmw_fb_init() 679 if (unlikely(par->vmalloc == NULL)) { in vmw_fb_init() 701 info->screen_base = (char __iomem *)par->vmalloc; in vmw_fb_init() 757 vfree(par->vmalloc); in vmw_fb_init() 785 vfree(par->vmalloc); in vmw_fb_close()
|
/linux/fs/squashfs/ |
A D | lzo_wrapper.c | 35 stream->input = vmalloc(block_size); in lzo_init() 38 stream->output = vmalloc(block_size); in lzo_init()
|
A D | lz4_wrapper.c | 60 stream->input = vmalloc(block_size); in lz4_init() 63 stream->output = vmalloc(block_size); in lz4_init()
|
/linux/sound/drivers/opl4/ |
A D | opl4_proc.c | 46 buf = vmalloc(count); in snd_opl4_mem_proc_read() 67 buf = vmalloc(count); in snd_opl4_mem_proc_write()
|
/linux/drivers/scsi/fnic/ |
A D | fnic_debugfs.c | 61 fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type)); in fnic_debugfs_init() 219 fnic_dbg_prt->buffer = vmalloc(array3_size(3, trace_max_pages, in fnic_trace_debugfs_open() 230 vmalloc(array3_size(3, fnic_fc_trace_max_pages, in fnic_trace_debugfs_open() 605 debug->debug_buffer = vmalloc(buf_size); in fnic_stats_debugfs_open()
|
/linux/Documentation/arm/ |
A D | porting.rst | 97 Virtual addresses bounding the vmalloc() area. There must not be 98 any static mappings in this area; vmalloc will overwrite them. 100 Normally, the vmalloc() area starts VMALLOC_OFFSET bytes above the 105 between virtual RAM and the vmalloc area. We do this to allow
|
A D | memory.rst | 57 mapping within the vmalloc space. 59 VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. 60 Memory returned by vmalloc/ioremap will
|
/linux/drivers/video/ |
A D | vgastate.c | 362 saved->vga_cmap = vmalloc(768); in save_vga() 384 saved->attr = vmalloc(total); in save_vga() 421 saved->vga_font0 = vmalloc(4 * 8192); in save_vga() 433 saved->vga_font1 = vmalloc(state->memsize); in save_vga() 444 saved->vga_text = vmalloc(8192 * 2); in save_vga()
|
/linux/drivers/mtd/tests/ |
A D | stresstest.c | 188 readbuf = vmalloc(bufsize); in mtd_stresstest_init() 189 writebuf = vmalloc(bufsize); in mtd_stresstest_init()
|
/linux/arch/um/include/shared/ |
A D | um_malloc.h | 14 extern void *vmalloc(unsigned long size);
|