Home
last modified time | relevance | path

Searched refs:locked_vm (Results 1 – 17 of 17) sorted by relevance

/linux/mm/
A Dutil.c450 unsigned long locked_vm, limit; in __account_locked_vm() local
455 locked_vm = mm->locked_vm; in __account_locked_vm()
459 if (locked_vm + pages > limit) in __account_locked_vm()
463 mm->locked_vm = locked_vm + pages; in __account_locked_vm()
465 WARN_ON_ONCE(pages > locked_vm); in __account_locked_vm()
466 mm->locked_vm = locked_vm - pages; in __account_locked_vm()
471 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), in __account_locked_vm()
A Dmmap.c1360 locked += mm->locked_vm; in mlock_future_check()
1855 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
2349 locked = mm->locked_vm + grow; in acct_stack_growth()
2442 mm->locked_vm += grow; in expand_upwards()
2522 mm->locked_vm += grow; in expand_downwards()
2782 mm->locked_vm -= vma_pages(tmp); in unlock_range()
2873 if (mm->locked_vm) in __do_munmap()
3084 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
3156 if (mm->locked_vm) in exit_mmap()
A Dmremap.c706 mm->locked_vm += new_len >> PAGE_SHIFT; in move_vma()
768 locked = mm->locked_vm << PAGE_SHIFT; in vma_to_resize()
1031 mm->locked_vm += pages; in SYSCALL_DEFINE5()
A Dmlock.c542 mm->locked_vm += nr_pages; in mlock_fixup()
669 locked += current->mm->locked_vm; in do_mlock()
A Ddebug.c247 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
/linux/net/xdp/
A Dxdp_umem.c37 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
141 old_npgs = atomic_long_read(&umem->user->locked_vm); in xdp_umem_account_pages()
148 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, in xdp_umem_account_pages()
/linux/include/linux/sched/
A Duser.h28 atomic_long_t locked_vm; member
/linux/include/linux/
A Dmm_types.h551 unsigned long locked_vm; /* Pages that have PG_mlocked set */ member
/linux/Documentation/vm/
A Dunevictable-lru.rst356 VMAs against the task's "locked_vm".
485 to be mlocked to the task's "locked_vm". To account for filtered VMAs,
487 callers then subtract a non-negative return value from the task's locked_vm. A
490 memory range accounted as locked_vm, as the protections could be changed later
/linux/drivers/infiniband/sw/siw/
A Dsiw_verbs.c1308 if (num_pages > mem_limit - current->mm->locked_vm) { in siw_reg_user_mr()
1311 current->mm->locked_vm); in siw_reg_user_mr()
/linux/fs/proc/
A Dtask_mmu.c61 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
/linux/net/core/
A Dskbuff.c1142 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages()
1146 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != in mm_account_pinned_pages()
1163 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
/linux/Documentation/driver-api/
A Dvfio.rst465 mm::locked_vm counter to make sure we do not exceed the rlimit.
/linux/kernel/events/
A Dcore.c6080 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); in perf_mmap_close()
6155 &mmap_user->locked_vm); in perf_mmap_close()
6301 user_locked = atomic_long_read(&user->locked_vm); in perf_mmap()
6364 atomic_long_add(user_extra, &user->locked_vm); in perf_mmap()
/linux/kernel/
A Dfork.c1048 mm->locked_vm = 0; in mm_init()
/linux/drivers/vfio/
A Dvfio_iommu_type1.c708 mm->locked_vm + lock_acct + 1 > limit) { in vfio_pin_pages_remote()
/linux/fs/
A Dio_uring.c8789 atomic_long_sub(nr_pages, &user->locked_vm); in __io_unaccount_mem()
8801 cur_pages = atomic_long_read(&user->locked_vm); in __io_account_mem()
8805 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, in __io_account_mem()

Completed in 107 milliseconds