/linux/arch/s390/lib/ |
A D | spinlock.c | 75 int owner; in arch_load_niai4() local 81 return owner; in arch_load_niai4() 162 if (owner && arch_vcpu_is_preempted(owner - 1)) in arch_spin_lock_queued() 163 smp_yield_cpu(owner - 1); in arch_spin_lock_queued() 174 if (owner && arch_vcpu_is_preempted(owner - 1)) in arch_spin_lock_queued() 184 if (!owner) { in arch_spin_lock_queued() 219 if (owner && arch_vcpu_is_preempted(owner - 1)) in arch_spin_lock_classic() 220 smp_yield_cpu(owner - 1); in arch_spin_lock_classic() 227 if (!owner) { in arch_spin_lock_classic() 255 int owner, count; in arch_spin_trylock_retry() local [all …]
|
/linux/kernel/locking/ |
A D | rtmutex.c | 109 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters() local 203 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters() local 206 owner = *p; in mark_rt_mutex_waiters() 208 owner | RT_MUTEX_HAS_WAITERS) != owner); in mark_rt_mutex_waiters() 281 lock->owner = NULL; in unlock_rt_mutex_safe() 1107 if (owner == task) in task_blocks_on_rt_mutex() 1139 if (!owner) in task_blocks_on_rt_mutex() 1512 owner = NULL; in rt_mutex_slowlock_block() 1515 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block() 1694 owner = NULL; in rtlock_slowlock_locked() [all …]
|
A D | mutex.c | 94 return owner & MUTEX_FLAGS; in __owner_flags() 104 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common() 126 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { in __mutex_trylock_common() 230 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff() local 243 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) in __mutex_handoff() 412 if (owner) in mutex_can_spin_on_owner() 413 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner() 473 if (!owner) in mutex_optimistic_spin() 898 unsigned long owner; in __mutex_unlock_slowpath() local 909 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath() [all …]
|
A D | spinlock_debug.c | 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 47 lock->owner = SPINLOCK_OWNER_INIT; in __rwlock_init() 56 struct task_struct *owner = READ_ONCE(lock->owner); in spin_dump() local 58 if (owner == SPINLOCK_OWNER_INIT) in spin_dump() 59 owner = NULL; in spin_dump() 66 owner ? owner->comm : "<none>", in spin_dump() 67 owner ? task_pid_nr(owner) : -1, in spin_dump() 94 WRITE_ONCE(lock->owner, current); in debug_spin_lock_after() 104 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); in debug_spin_unlock() 194 WRITE_ONCE(lock->owner, current); in debug_write_lock_after() [all …]
|
A D | rwsem.c | 143 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner() 224 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable() local 229 if (owner & RWSEM_NONSPINNABLE) in rwsem_set_nonspinnable() 231 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable() 278 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags() local 320 atomic_long_set(&sem->owner, 0L); in __init_rwsem() 448 struct task_struct *owner; in rwsem_mark_wake() local 473 owner = waiter->task; in rwsem_mark_wake() 667 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu() 672 struct task_struct *owner; in rwsem_can_spin_on_owner() local [all …]
|
/linux/drivers/xen/ |
A D | pci.c | 274 if (owner->dev == dev) in find_device() 275 return owner; in find_device() 287 if (owner) in xen_find_device_domain_owner() 288 domain = owner->domain; in xen_find_device_domain_owner() 299 if (!owner) in xen_register_device_domain_owner() 305 kfree(owner); in xen_register_device_domain_owner() 308 owner->domain = domain; in xen_register_device_domain_owner() 309 owner->dev = dev; in xen_register_device_domain_owner() 322 if (!owner) { in xen_unregister_device_domain_owner() 326 list_del(&owner->list); in xen_unregister_device_domain_owner() [all …]
|
/linux/drivers/media/mc/ |
A D | mc-dev-allocator.c | 31 struct module *owner; member 63 struct module *owner) in __media_device_get() argument 74 if (owner != mdi->owner && !try_module_get(mdi->owner)) in __media_device_get() 88 mdi->owner = owner; in __media_device_get() 99 struct module *owner) in media_device_usb_allocate() argument 104 mdev = __media_device_get(&udev->dev, module_name, owner); in media_device_usb_allocate() 120 struct module *owner) in media_device_delete() argument 126 if (mdi->owner != owner) { in media_device_delete() 127 module_put(mdi->owner); in media_device_delete()
|
/linux/kernel/bpf/ |
A D | bpf_local_storage.c | 48 return map->ops->map_owner_storage_ptr(owner); in owner_storage() 79 mem_uncharge(smap, owner, smap->elem_size); in bpf_selem_alloc() 94 void *owner; in bpf_selem_unlink_storage_nolock() local 97 owner = local_storage->owner; in bpf_selem_unlink_storage_nolock() 104 mem_uncharge(smap, owner, smap->elem_size); in bpf_selem_unlink_storage_nolock() 110 local_storage->owner = NULL; in bpf_selem_unlink_storage_nolock() 261 int bpf_local_storage_alloc(void *owner, in bpf_local_storage_alloc() argument 269 err = mem_charge(smap, owner, sizeof(*storage)); in bpf_local_storage_alloc() 282 storage->owner = owner; in bpf_local_storage_alloc() 319 mem_uncharge(smap, owner, sizeof(*storage)); in bpf_local_storage_alloc() [all …]
|
/linux/scripts/coccinelle/api/ |
A D | platform_no_drv_owner.cocci | 2 /// Remove .owner field if calls are used which set it automatically 31 - .owner = THIS_MODULE, 40 - .owner = THIS_MODULE, 62 - .owner = THIS_MODULE, 71 - .owner = THIS_MODULE, 84 * .owner@j0 = THIS_MODULE, 95 * .owner@j0 = THIS_MODULE, 106 * .owner@j0 = THIS_MODULE, 117 * .owner@j0 = THIS_MODULE, 127 msg = "No need to set .owner here. The core will do it." [all …]
|
/linux/drivers/regulator/ |
A D | db8500-prcmu.c | 221 .owner = THIS_MODULE, 231 .owner = THIS_MODULE, 241 .owner = THIS_MODULE, 251 .owner = THIS_MODULE, 261 .owner = THIS_MODULE, 271 .owner = THIS_MODULE, 284 .owner = THIS_MODULE, 294 .owner = THIS_MODULE, 304 .owner = THIS_MODULE, 315 .owner = THIS_MODULE, [all …]
|
A D | lp8788-ldo.c | 193 .owner = THIS_MODULE, 206 .owner = THIS_MODULE, 219 .owner = THIS_MODULE, 232 .owner = THIS_MODULE, 245 .owner = THIS_MODULE, 258 .owner = THIS_MODULE, 271 .owner = THIS_MODULE, 284 .owner = THIS_MODULE, 297 .owner = THIS_MODULE, 310 .owner = THIS_MODULE, [all …]
|
A D | lm363x-regulator.c | 114 .owner = THIS_MODULE, 127 .owner = THIS_MODULE, 142 .owner = THIS_MODULE, 157 .owner = THIS_MODULE, 172 .owner = THIS_MODULE, 188 .owner = THIS_MODULE, 201 .owner = THIS_MODULE, 216 .owner = THIS_MODULE, 233 .owner = THIS_MODULE, 246 .owner = THIS_MODULE, [all …]
|
A D | pca9450-regulator.c | 235 .owner = THIS_MODULE, 264 .owner = THIS_MODULE, 293 .owner = THIS_MODULE, 318 .owner = THIS_MODULE, 336 .owner = THIS_MODULE, 354 .owner = THIS_MODULE, 372 .owner = THIS_MODULE, 390 .owner = THIS_MODULE, 408 .owner = THIS_MODULE, 426 .owner = THIS_MODULE, [all …]
|
/linux/fs/xfs/libxfs/ |
A D | xfs_rmap.c | 38 uint64_t owner, in xfs_rmap_lookup_le() argument 60 uint64_t owner, in xfs_rmap_lookup_eq() argument 107 uint64_t owner, in xfs_rmap_insert() argument 149 uint64_t owner, in xfs_rmap_delete() argument 292 uint64_t owner, in xfs_rmap_find_left_neighbor() argument 370 uint64_t owner, in xfs_rmap_lookup_le_range() argument 415 uint64_t owner, in xfs_rmap_free_check_owner() argument 495 uint64_t owner; in xfs_rmap_unmap() local 729 uint64_t owner, in xfs_rmap_is_mergeable() argument 1002 uint64_t owner; in xfs_rmap_convert() local [all …]
|
A D | xfs_rmap.h | 86 uint64_t *owner, in xfs_owner_info_unpack() argument 92 *owner = oinfo->oi_owner; in xfs_owner_info_unpack() 104 uint64_t owner, in xfs_owner_info_pack() argument 108 oinfo->oi_owner = owner; in xfs_owner_info_pack() 125 xfs_extlen_t len, uint64_t owner, uint64_t offset, 128 xfs_extlen_t len, uint64_t owner, uint64_t offset, 131 xfs_extlen_t len, uint64_t owner, uint64_t offset, 176 xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner); 178 xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner); 188 uint64_t owner, uint64_t offset, unsigned int flags, [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/falcon/ |
A D | base.c | 90 struct nvkm_device *device = falcon->owner->device; in nvkm_falcon_enable() 93 nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst); in nvkm_falcon_enable() 96 nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); in nvkm_falcon_enable() 106 struct nvkm_device *device = falcon->owner->device; in nvkm_falcon_disable() 109 if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst)) in nvkm_falcon_disable() 114 nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); in nvkm_falcon_disable() 140 const struct nvkm_subdev *subdev = falcon->owner; in nvkm_falcon_oneinit() 213 falcon->owner = subdev; in nvkm_falcon_ctor()
|
/linux/Documentation/vm/ |
A D | page_owner.rst | 4 page owner: Tracking about who allocated each page 10 page owner is for the tracking about who allocated each page. 24 page owner can also be used for various purposes. For example, accurate 26 each page. It is already implemented and activated if page owner is 31 with page owner and page owner is disabled in runtime due to no enabling 41 - Without page owner:: 46 - With page owner:: 55 page owner and turning it on if needed would be great option to debug 64 Although it doesn't mean that they have the right owner information, 79 2) Enable page owner: add "page_owner=on" to boot cmdline. [all …]
|
/linux/arch/csky/include/asm/ |
A D | spinlock.h | 29 while (lockval.tickets.next != lockval.tickets.owner) in arch_spin_lock() 30 lockval.tickets.owner = READ_ONCE(lock->tickets.owner); in arch_spin_lock() 66 WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1); in arch_spin_unlock() 71 return lock.tickets.owner == lock.tickets.next; in arch_spin_value_unlocked() 83 return (tickets.next - tickets.owner) > 1; in arch_spin_is_contended()
|
/linux/kernel/ |
A D | audit_tree.c | 35 struct audit_tree *owner; member 301 owner->root = new; in replace_chunk() 307 owner = old->owners[j].owner; in replace_chunk() 308 new->owners[i].owner = owner; in replace_chunk() 312 get_tree(owner); in replace_chunk() 327 struct audit_tree *owner = p->owner; in remove_chunk_node() local 331 owner->root = NULL; in remove_chunk_node() 334 p->owner = NULL; in remove_chunk_node() 335 put_tree(owner); in remove_chunk_node() 505 p->owner = tree; in tag_chunk() [all …]
|
/linux/Documentation/locking/ |
A D | rt-mutex.rst | 19 A low priority owner of a rt-mutex inherits the priority of a higher 21 boosted owner blocks on a rt-mutex itself it propagates the priority 22 boosting to the owner of the other rt_mutex it gets blocked on. The 34 rtmutex, only the top priority waiter is enqueued into the owner's 37 got a signal), the priority of the owner task is readjusted. The 46 The state of the rt-mutex is tracked via the owner field of the rt-mutex 49 lock->owner holds the task_struct pointer of the owner. Bit 0 is used to 53 owner bit0 Notes 63 possible when bit 0 of lock->owner is 0. 76 that anymore. The pending owner happens to be the top_waiter of a lock [all …]
|
/linux/drivers/iommu/ |
A D | exynos-iommu.c | 670 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_suspend() 834 mutex_lock(&owner->rpm_lock); in exynos_iommu_detach_device() 851 owner->domain = NULL; in exynos_iommu_detach_device() 872 if (owner->domain) in exynos_iommu_attach_device() 875 mutex_lock(&owner->rpm_lock); in exynos_iommu_attach_device() 885 owner->domain = iommu_domain; in exynos_iommu_attach_device() 1257 if (owner->domain) { in exynos_iommu_release_device() 1261 WARN_ON(owner->domain != in exynos_iommu_release_device() 1288 if (!owner) { in exynos_iommu_of_xlate() 1289 owner = kzalloc(sizeof(*owner), GFP_KERNEL); in exynos_iommu_of_xlate() [all …]
|
/linux/include/linux/ |
A D | local_lock_internal.h | 14 struct task_struct *owner; member 25 .owner = NULL, 30 DEBUG_LOCKS_WARN_ON(l->owner); in local_lock_acquire() 31 l->owner = current; in local_lock_acquire() 36 DEBUG_LOCKS_WARN_ON(l->owner != current); in local_lock_release() 37 l->owner = NULL; in local_lock_release() 43 l->owner = NULL; in local_lock_debug_init()
|
/linux/arch/powerpc/include/asm/ |
A D | rheap.h | 23 const char *owner; member 43 const char *owner; member 67 const char *owner); 70 extern unsigned long rh_alloc(rh_info_t * info, int size, const char *owner); 74 const char *owner); 90 extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner);
|
/linux/drivers/pinctrl/ |
A D | pinmux.c | 125 pin, desc->name, owner); in pin_request() 149 desc->mux_owner = owner; in pin_request() 191 pin, owner, status); in pin_request() 212 const char *owner; in pin_free() local 252 return owner; in pin_free() 266 const char *owner; in pinmux_request_gpio() local 271 if (!owner) in pinmux_request_gpio() 276 kfree(owner); in pinmux_request_gpio() 290 const char *owner; in pinmux_free_gpio() local 293 kfree(owner); in pinmux_free_gpio() [all …]
|
/linux/drivers/dma/bestcomm/ |
A D | sram.c | 41 int bcom_sram_init(struct device_node *sram_node, char *owner) in bcom_sram_init() argument 51 "Already initialized !\n", owner); in bcom_sram_init() 58 "Couldn't allocate internal state !\n", owner); in bcom_sram_init() 66 "Invalid device node !\n", owner); in bcom_sram_init() 77 if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { in bcom_sram_init() 79 "Couldn't request region !\n", owner); in bcom_sram_init() 91 owner, (long)bcom_sram->base_phys, bcom_sram->size ); in bcom_sram_init()
|