/linux/include/asm-generic/ |
A D | barrier.h | 141 #ifndef smp_load_acquire 142 #define smp_load_acquire(p) __smp_load_acquire(p) macro 168 #ifndef smp_load_acquire 169 #define smp_load_acquire(p) \ macro
|
/linux/drivers/infiniband/sw/rxe/ |
A D | rxe_queue.h | 102 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer() 109 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer() 130 cons = smp_load_acquire(&q->buf->consumer_index); in queue_get_consumer() 137 cons = smp_load_acquire(&q->buf->consumer_index); in queue_get_consumer()
|
/linux/kernel/bpf/ |
A D | ringbuf.c | 248 cons_pos = smp_load_acquire(&rb->consumer_pos); in ringbuf_avail_data_sz() 249 prod_pos = smp_load_acquire(&rb->producer_pos); in ringbuf_avail_data_sz() 318 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_ringbuf_reserve() 390 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit() 464 return smp_load_acquire(&rb->consumer_pos); in BPF_CALL_2() 466 return smp_load_acquire(&rb->producer_pos); in BPF_CALL_2()
|
/linux/drivers/net/wireguard/ |
A D | queueing.c | 80 struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); in wg_prev_queue_dequeue() 87 next = smp_load_acquire(&NEXT(next)); in wg_prev_queue_dequeue() 97 next = smp_load_acquire(&NEXT(tail)); in wg_prev_queue_dequeue()
|
/linux/tools/include/asm/ |
A D | barrier.h | 58 #ifndef smp_load_acquire 59 # define smp_load_acquire(p) \ macro
|
/linux/tools/memory-model/litmus-tests/ |
A D | MP+pooncerelease+poacquireonce.litmus | 7 * smp_load_acquire() provide sufficient ordering for the message-passing 24 r0 = smp_load_acquire(flag);
|
A D | MP+polockmbonce+poacquiresilsil.litmus | 9 * returns false and the second true, we know that the smp_load_acquire() 29 r1 = smp_load_acquire(x);
|
A D | MP+polockonce+poacquiresilsil.litmus | 9 * the smp_load_acquire() executed before the lock was acquired (loosely 28 r1 = smp_load_acquire(x);
|
A D | ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus | 26 r0 = smp_load_acquire(y); 35 r0 = smp_load_acquire(z);
|
/linux/include/linux/ |
A D | freelist.h | 81 struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head); in freelist_try_get() 89 head = smp_load_acquire(&list->head); in freelist_try_get()
|
/linux/drivers/net/netdevsim/ |
A D | bus.c | 69 if (!smp_load_acquire(&nsim_bus_dev->init)) in new_port_store() 99 if (!smp_load_acquire(&nsim_bus_dev->init)) in del_port_store() 176 if (!smp_load_acquire(&nsim_bus_enable)) { in new_device_store() 225 if (!smp_load_acquire(&nsim_bus_enable)) { in del_device_store()
|
/linux/lib/ |
A D | stackdepot.c | 83 if (smp_load_acquire(&next_slab_inited)) in init_stack_slab() 337 found = find_stack(smp_load_acquire(bucket), entries, in __stack_depot_save() 350 if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) { in __stack_depot_save()
|
/linux/tools/lib/bpf/ |
A D | ringbuf.c | 214 cons_pos = smp_load_acquire(r->consumer_pos); in ringbuf_process_ring() 217 prod_pos = smp_load_acquire(r->producer_pos); in ringbuf_process_ring() 220 len = smp_load_acquire(len_ptr); in ringbuf_process_ring()
|
/linux/fs/crypto/ |
A D | fscrypt_private.h | 369 return smp_load_acquire(&prep_key->blk_key) != NULL; in fscrypt_is_key_prepared() 370 return smp_load_acquire(&prep_key->tfm) != NULL; in fscrypt_is_key_prepared() 404 return smp_load_acquire(&prep_key->tfm) != NULL; in fscrypt_is_key_prepared()
|
/linux/drivers/media/dvb-core/ |
A D | dvb_ringbuffer.c | 60 return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); in dvb_ringbuffer_empty() 90 avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; in dvb_ringbuffer_avail() 106 smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); in dvb_ringbuffer_flush()
|
/linux/net/unix/ |
A D | unix_bpf.c | 120 if (unlikely(ops != smp_load_acquire(&unix_dgram_prot_saved))) { in unix_dgram_bpf_check_needs_rebuild() 132 if (unlikely(ops != smp_load_acquire(&unix_stream_prot_saved))) { in unix_stream_bpf_check_needs_rebuild()
|
/linux/arch/arm/include/asm/ |
A D | mcs_spinlock.h | 13 while (!(smp_load_acquire(lock))) \
|
/linux/scripts/atomic/fallbacks/ |
A D | read_acquire | 5 return smp_load_acquire(&(v)->counter);
|
/linux/tools/arch/ia64/include/asm/ |
A D | barrier.h | 52 #define smp_load_acquire(p) \ macro
|
/linux/tools/arch/s390/include/asm/ |
A D | barrier.h | 37 #define smp_load_acquire(p) \ macro
|
/linux/tools/memory-model/Documentation/ |
A D | recipes.txt | 216 Use of smp_store_release() and smp_load_acquire() is one way to force 228 r0 = smp_load_acquire(&y); 233 store, while the smp_load_acquire macro orders the load against any 246 use of smp_store_release() and smp_load_acquire(), except that both 291 and to use smp_load_acquire() instead of smp_rmb(). However, the older 421 r0 = smp_load_acquire(y); 427 r1 = smp_load_acquire(z); 435 example, ordering would still be preserved if CPU1()'s smp_load_acquire() 450 r0 = smp_load_acquire(y);
|
/linux/tools/arch/powerpc/include/asm/ |
A D | barrier.h | 39 #define smp_load_acquire(p) \ macro
|
/linux/tools/arch/sparc/include/asm/ |
A D | barrier_64.h | 49 #define smp_load_acquire(p) \ macro
|
/linux/tools/include/linux/ |
A D | ring_buffer.h | 59 return smp_load_acquire(&base->data_head); in ring_buffer_read_head()
|
/linux/tools/arch/x86/include/asm/ |
A D | barrier.h | 39 #define smp_load_acquire(p) \ macro
|