/linux/include/linux/ |
A D | rwlock_api_smp.h | 185 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_write_lock_irqsave() 194 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_write_lock_irq() 201 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_write_lock_bh() 216 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_write_unlock() 223 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_read_unlock() 231 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_read_unlock_irqrestore() 239 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_read_unlock_irq() 247 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_read_unlock_bh() 255 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_write_unlock_irqrestore() 263 rwlock_release(&lock->dep_map, _RET_IP_); in __raw_write_unlock_irq() [all …]
|
A D | spinlock_api_smp.h | 90 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); in __raw_spin_trylock() 110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_spin_lock_irqsave() 119 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_spin_lock_irq() 126 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_spin_lock_bh() 133 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); in __raw_spin_lock() 141 spin_release(&lock->dep_map, _RET_IP_); in __raw_spin_unlock() 149 spin_release(&lock->dep_map, _RET_IP_); in __raw_spin_unlock_irqrestore() 157 spin_release(&lock->dep_map, _RET_IP_); in __raw_spin_unlock_irq() 165 spin_release(&lock->dep_map, _RET_IP_); in __raw_spin_unlock_bh() 167 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); in __raw_spin_unlock_bh() [all …]
|
A D | percpu-rwsem.h | 51 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in percpu_down_read() 92 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); in percpu_down_read_trylock() 99 rwsem_release(&sem->dep_map, _RET_IP_); in percpu_up_read()
|
A D | kasan.h | 235 return __kasan_slab_free(s, object, _RET_IP_, init); in kasan_slab_free() 243 __kasan_kfree_large(ptr, _RET_IP_); in kasan_kfree_large() 250 __kasan_slab_free_mempool(ptr, _RET_IP_); in kasan_slab_free_mempool() 301 return __kasan_check_byte(addr, _RET_IP_); in kasan_check_byte()
|
/linux/drivers/tty/ |
A D | tty_ldsem.c | 304 lock_contended(&sem->dep_map, _RET_IP_); in __ldsem_down_read_nested() 306 rwsem_release(&sem->dep_map, _RET_IP_); in __ldsem_down_read_nested() 310 lock_acquired(&sem->dep_map, _RET_IP_); in __ldsem_down_read_nested() 323 lock_contended(&sem->dep_map, _RET_IP_); in __ldsem_down_write_nested() 325 rwsem_release(&sem->dep_map, _RET_IP_); in __ldsem_down_write_nested() 329 lock_acquired(&sem->dep_map, _RET_IP_); in __ldsem_down_write_nested() 353 lock_acquired(&sem->dep_map, _RET_IP_); in ldsem_down_read_trylock() 378 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); in ldsem_down_write_trylock() 379 lock_acquired(&sem->dep_map, _RET_IP_); in ldsem_down_write_trylock() 393 rwsem_release(&sem->dep_map, _RET_IP_); in ldsem_up_read() [all …]
|
/linux/kernel/locking/ |
A D | rtmutex_api.c | 29 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in __rt_mutex_lock_common() 32 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common() 105 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in rt_mutex_trylock() 118 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_unlock() 508 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_); in _mutex_lock_nest_lock() 542 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in mutex_lock() 548 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); in mutex_lock_interruptible() 554 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); in mutex_lock_killable() 562 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in mutex_lock_io() 577 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in mutex_trylock() [all …]
|
A D | spinlock_rt.c | 54 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); in rt_spin_lock() 62 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in rt_spin_lock_nested() 70 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); in rt_spin_lock_nest_lock() 78 spin_release(&lock->dep_map, _RET_IP_); in rt_spin_unlock() 107 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); in __rt_spin_trylock() 200 rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); in rt_read_trylock() 214 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); in rt_write_trylock() 225 rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); in rt_read_lock() 235 rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); in rt_write_lock() 244 rwlock_release(&rwlock->dep_map, _RET_IP_); in rt_read_unlock() [all …]
|
A D | mutex.c | 547 __mutex_unlock_slowpath(lock, _RET_IP_); in mutex_unlock() 800 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); in _mutex_lock_nest_lock() 827 subclass, NULL, _RET_IP_, NULL, 0); in mutex_lock_io_nested() 865 0, _RET_IP_, ctx); in ww_mutex_lock() 880 0, _RET_IP_, ctx); in ww_mutex_lock_interruptible() 1028 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_slowpath() 1034 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); in __mutex_lock_killable_slowpath() 1040 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_interruptible_slowpath() 1047 _RET_IP_, ctx); in __ww_mutex_lock_slowpath() 1055 _RET_IP_, ctx); in __ww_mutex_lock_interruptible_slowpath() [all …]
|
A D | rwsem.c | 1482 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_interruptible() 1496 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable() 1523 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write() 1534 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write_killable() 1538 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable() 1554 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); in down_write_trylock() 1565 rwsem_release(&sem->dep_map, _RET_IP_); in up_read() 1575 rwsem_release(&sem->dep_map, _RET_IP_); in up_write() 1585 lock_downgrade(&sem->dep_map, _RET_IP_); in downgrade_write() 1606 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable_nested() [all …]
|
A D | ww_rt_mutex.c | 29 mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_); in ww_mutex_trylock() 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); in ww_mutex_lock() 88 return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_); in ww_mutex_lock_interruptible() 98 mutex_release(&rtm->dep_map, _RET_IP_); in ww_mutex_unlock()
|
A D | spinlock.c | 368 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in _raw_spin_lock_nested() 380 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in _raw_spin_lock_irqsave_nested() 390 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); in _raw_spin_lock_nest_lock()
|
A D | percpu-rwsem.c | 217 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in percpu_down_write() 244 rwsem_release(&sem->dep_map, _RET_IP_); in percpu_up_write()
|
/linux/kernel/kcsan/ |
A D | core.c | 767 check_access(ptr, size, type, _RET_IP_); in kcsan_begin_scoped_access() 775 sa->ip = _RET_IP_; in kcsan_begin_scoped_access() 813 check_access(ptr, size, type, _RET_IP_); in __kcsan_check_access() 854 _RET_IP_); \ 870 check_access(ptr, size, 0, _RET_IP_); in __tsan_read_range() 877 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); in __tsan_write_range() 899 _RET_IP_); \ 915 _RET_IP_); \ 992 KCSAN_ACCESS_ATOMIC, _RET_IP_); \ 1024 KCSAN_ACCESS_ATOMIC, _RET_IP_); \ [all …]
|
/linux/mm/kasan/ |
A D | shadow.c | 31 return kasan_check_range((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read() 37 return kasan_check_range((unsigned long)p, size, true, _RET_IP_); in __kasan_check_write() 44 if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) in memset() 54 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || in memmove() 55 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) in memmove() 65 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || in memcpy() 66 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) in memcpy()
|
A D | sw_tags.c | 137 kasan_check_range(addr, size, false, _RET_IP_); \ 142 kasan_check_range(addr, size, true, _RET_IP_); \ 154 kasan_check_range(addr, size, false, _RET_IP_); in __hwasan_loadN_noabort() 160 kasan_check_range(addr, size, true, _RET_IP_); in __hwasan_storeN_noabort()
|
A D | report_generic.c | 295 kasan_report(addr, size, false, _RET_IP_); \ 302 kasan_report(addr, size, true, _RET_IP_); \ 319 kasan_report(addr, size, false, _RET_IP_); in __asan_report_load_n_noabort() 325 kasan_report(addr, size, true, _RET_IP_); in __asan_report_store_n_noabort()
|
A D | generic.c | 238 check_region_inline(addr, size, false, _RET_IP_); \ 246 check_region_inline(addr, size, true, _RET_IP_); \ 261 kasan_check_range(addr, size, false, _RET_IP_); in __asan_loadN() 271 kasan_check_range(addr, size, true, _RET_IP_); in __asan_storeN()
|
/linux/fs/xfs/ |
A D | xfs_buf.c | 266 trace_xfs_buf_init(bp, _RET_IP_); in _xfs_buf_alloc() 300 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free() 943 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold() 959 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele() 1057 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock() 1076 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock() 1082 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock() 1092 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock() 1252 trace_xfs_buf_iodone(bp, _RET_IP_); in xfs_buf_ioend() 1572 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_iowait() [all …]
|
A D | xfs_trans.c | 74 trace_xfs_trans_free(tp, _RET_IP_); in xfs_trans_free() 96 trace_xfs_trans_dup(tp, _RET_IP_); in xfs_trans_dup() 307 trace_xfs_trans_alloc(tp, _RET_IP_); in xfs_trans_alloc() 657 trace_xfs_trans_add_item(tp, _RET_IP_); in xfs_trans_add_item() 681 trace_xfs_trans_free_items(tp, _RET_IP_); in xfs_trans_free_items() 843 trace_xfs_trans_commit(tp, _RET_IP_); in __xfs_trans_commit() 943 trace_xfs_trans_cancel(tp, _RET_IP_); in xfs_trans_cancel() 992 trace_xfs_trans_roll(trans, _RET_IP_); in xfs_trans_roll()
|
/linux/kernel/ |
A D | kcov.c | 196 unsigned long ip = canonicalize_ip(_RET_IP_); in __sanitizer_cov_trace_pc() 249 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); in __sanitizer_cov_trace_cmp1() 255 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); in __sanitizer_cov_trace_cmp2() 261 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); in __sanitizer_cov_trace_cmp4() 267 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); in __sanitizer_cov_trace_cmp8() 274 _RET_IP_); in __sanitizer_cov_trace_const_cmp1() 281 _RET_IP_); in __sanitizer_cov_trace_const_cmp2() 288 _RET_IP_); in __sanitizer_cov_trace_const_cmp4() 295 _RET_IP_); in __sanitizer_cov_trace_const_cmp8() 323 write_comp_data(type, cases[i + 2], val, _RET_IP_); in __sanitizer_cov_trace_switch()
|
A D | stop_machine.c | 142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; in stop_one_cpu() 351 .caller = _RET_IP_, in stop_two_cpus() 387 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; in stop_one_cpu_nowait() 412 work->caller = _RET_IP_; in queue_stop_cpus_work()
|
/linux/fs/xfs/libxfs/ |
A D | xfs_refcount.c | 182 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_update() 214 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_insert() 252 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_delete() 397 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_split_extent() 474 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_merge_center_extents() 536 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_merge_left_extent() 600 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_merge_right_extent() 691 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_find_left_extents() 780 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_find_right_extents() 1103 error, _RET_IP_); in xfs_refcount_adjust() [all …]
|
A D | xfs_defer.c | 228 trace_xfs_defer_trans_abort(tp, _RET_IP_); in xfs_defer_trans_abort() 328 trace_xfs_defer_trans_roll(*tpp, _RET_IP_); in xfs_defer_trans_roll() 496 trace_xfs_defer_finish(*tp, _RET_IP_); in xfs_defer_finish_noroll() 528 trace_xfs_defer_finish_done(*tp, _RET_IP_); in xfs_defer_finish_noroll() 574 trace_xfs_defer_cancel(tp, _RET_IP_); in xfs_defer_cancel()
|
/linux/mm/ |
A D | slob.c | 526 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); in __kmalloc() 549 trace_kfree(_RET_IP_, block); in kfree() 611 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node() 616 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node() 639 return __do_kmalloc_node(size, gfp, node, _RET_IP_); in __kmalloc_node() 669 trace_kmem_cache_free(_RET_IP_, b, c->name); in kmem_cache_free()
|
A D | memblock.c | 672 &base, &end, nid, flags, (void *)_RET_IP_); in memblock_add_node() 693 &base, &end, (void *)_RET_IP_); in memblock_add() 794 &base, &end, (void *)_RET_IP_); in memblock_remove() 826 &base, &end, (void *)_RET_IP_); in memblock_phys_free() 837 &base, &end, (void *)_RET_IP_); in memblock_reserve() 848 &base, &end, (void *)_RET_IP_); in memblock_physmem_add() 1425 (void *)_RET_IP_); in memblock_phys_alloc_range() 1526 &max_addr, (void *)_RET_IP_); in memblock_alloc_exact_nid_raw() 1558 &max_addr, (void *)_RET_IP_); in memblock_alloc_try_nid_raw() 1590 &max_addr, (void *)_RET_IP_); in memblock_alloc_try_nid() [all …]
|