/linux/kernel/events/ |
A D | ring_buffer.c | 38 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local 52 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local 170 rb = rcu_dereference(event->rb); in __perf_output_begin() 180 handle->rb = rb; in __perf_output_begin() 231 local_add(rb->watermark, &rb->wakeup); in __perf_output_begin() 410 handle->rb = rb; in perf_aux_output_begin() 459 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { in rb_need_aux_wakeup() 460 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); in rb_need_aux_wakeup() 480 struct perf_buffer *rb = handle->rb; in perf_aux_output_end() local 537 struct perf_buffer *rb = handle->rb; in perf_aux_output_skip() local [all …]
|
A D | internal.h | 65 struct perf_buffer *rb; in rb_free_rcu() local 68 rb_free(rb); in rb_free_rcu() 73 if (!pause && rb->nr_pages) in rb_toggle_paused() 74 rb->paused = 0; in rb_toggle_paused() 76 rb->paused = 1; in rb_toggle_paused() 90 return !!rb->aux_nr_pages; in rb_has_aux() 108 return rb->page_order; in page_order() 121 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size() 126 return rb->aux_nr_pages << PAGE_SHIFT; in perf_aux_size() 144 struct perf_buffer *rb = handle->rb; \ [all …]
|
/linux/drivers/scsi/bfa/ |
A D | bfa_ioc_ct.c | 185 void __iomem *rb; in bfa_ioc_ct_reg_init() local 188 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 244 void __iomem *rb; in bfa_ioc_ct2_reg_init() local 247 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init() 597 writel(0, (rb + OP_MODE)); in bfa_ioc_ct_pll_init() 821 bfa_ioc_ct2_sclk_init(rb); in bfa_ioc_ct2_clk_reset() 822 bfa_ioc_ct2_lclk_init(rb); in bfa_ioc_ct2_clk_reset() 898 bfa_ioc_ct2_clk_reset(rb); in bfa_ioc_ct2_pll_init() 901 bfa_ioc_ct2_mac_reset(rb); in bfa_ioc_ct2_pll_init() 903 bfa_ioc_ct2_clk_reset(rb); in bfa_ioc_ct2_pll_init() [all …]
|
A D | bfa_ioc_cb.c | 138 void __iomem *rb; in bfa_ioc_cb_reg_init() local 141 rb = bfa_ioc_bar0(ioc); in bfa_ioc_cb_reg_init() 186 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); in bfa_ioc_cb_reg_init() 369 join_bits = readl(rb + BFA_IOC0_STATE_REG) & in bfa_ioc_cb_pll_init() 372 join_bits = readl(rb + BFA_IOC1_STATE_REG) & in bfa_ioc_cb_pll_init() 375 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); in bfa_ioc_cb_pll_init() 376 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); in bfa_ioc_cb_pll_init() 383 rb + APP_PLL_SCLK_CTL_REG); in bfa_ioc_cb_pll_init() 386 rb + APP_PLL_LCLK_CTL_REG); in bfa_ioc_cb_pll_init() 391 rb + APP_PLL_SCLK_CTL_REG); in bfa_ioc_cb_pll_init() [all …]
|
/linux/drivers/net/ethernet/brocade/bna/ |
A D | bfa_ioc_ct.c | 251 void __iomem *rb; in bfa_ioc_ct_reg_init() local 254 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 310 void __iomem *rb; in bfa_ioc_ct2_reg_init() local 313 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init() 616 writel(0, (rb + OP_MODE)); in bfa_ioc_ct_pll_init() 620 (rb + ETH_MAC_SER_REG)); in bfa_ioc_ct_pll_init() 624 (rb + ETH_MAC_SER_REG)); in bfa_ioc_ct_pll_init() 789 bfa_ioc_ct2_sclk_init(rb); in bfa_ioc_ct2_mac_reset() 790 bfa_ioc_ct2_lclk_init(rb); in bfa_ioc_ct2_mac_reset() 888 bfa_ioc_ct2_mac_reset(rb); in bfa_ioc_ct2_pll_init() [all …]
|
/linux/tools/lib/bpf/ |
A D | ringbuf.c | 81 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 86 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add() 91 r = &rb->rings[rb->ring_cnt]; in ring_buffer__add() 126 e = &rb->events[rb->ring_cnt]; in ring_buffer__add() 147 if (!rb) in ring_buffer__free() 151 ringbuf_unmap_ring(rb, &rb->rings[i]); in ring_buffer__free() 157 free(rb); in ring_buffer__free() 170 rb = calloc(1, sizeof(*rb)); in ring_buffer__new() 171 if (!rb) in ring_buffer__new() 187 return rb; in ring_buffer__new() [all …]
|
/linux/kernel/bpf/ |
A D | ringbuf.c | 108 if (rb) { in bpf_ringbuf_area_alloc() 110 rb->pages = pages; in bpf_ringbuf_area_alloc() 112 return rb; in bpf_ringbuf_area_alloc() 134 if (!rb) in bpf_ringbuf_alloc() 142 rb->consumer_pos = 0; in bpf_ringbuf_alloc() 145 return rb; in bpf_ringbuf_alloc() 173 if (!rb_map->rb) { in ringbuf_map_alloc() 189 vunmap(rb); in bpf_ringbuf_free() 338 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve() 390 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit() [all …]
|
/linux/drivers/hid/intel-ish-hid/ishtp/ |
A D | client-buffers.c | 29 if (!rb) { in ishtp_cl_alloc_rx_ring() 109 kfree(rb); in ishtp_cl_free_rx_ring() 119 kfree(rb); in ishtp_cl_free_rx_ring() 171 kfree(rb); in ishtp_io_rb_free() 187 if (!rb) in ishtp_io_rb_init() 191 rb->cl = cl; in ishtp_io_rb_init() 193 return rb; in ishtp_io_rb_init() 207 if (!rb) in ishtp_io_rb_alloc_buf() 235 if (!rb || !rb->cl) in ishtp_cl_io_rb_recycle() 291 if (rb) in ishtp_cl_rx_get_rb() [all …]
|
A D | client.c | 50 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush() 477 rb = NULL; in ishtp_cl_read_start() 485 rb->cl = cl; in ishtp_cl_read_start() 486 rb->buf_idx = 0; in ishtp_cl_read_start() 851 cl = rb->cl; in recv_ishtp_cl_msg() 858 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { in recv_ishtp_cl_msg() 874 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) { in recv_ishtp_cl_msg() 886 buffer = rb->buffer.data + rb->buf_idx; in recv_ishtp_cl_msg() 971 cl = rb->cl; in recv_ishtp_cl_msg_dma() 980 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { in recv_ishtp_cl_msg_dma() [all …]
|
/linux/lib/ |
A D | rbtree_test.c | 20 struct rb_node rb; member 166 for (count = 0; rb; rb = rb_parent(rb)) in black_path_count() 183 struct rb_node *rb; in check_postorder() local 185 for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb)) in check_postorder() 193 struct rb_node *rb; in check() local 197 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check() 198 struct test_node *node = rb_entry(rb, struct test_node, rb); in check() 201 (!rb_parent(rb) || is_red(rb_parent(rb)))); in check() 205 WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && in check() 223 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check_augmented() [all …]
|
/linux/kernel/printk/ |
A D | printk_ringbuffer.c | 917 if (!desc_push_tail(rb, id_prev_wrap)) in desc_reserve() 1373 e->rb = rb; in prb_reserve_in_last() 1499 if (!desc_reserve(rb, &id)) { in prb_reserve() 1501 atomic_long_inc(&rb->fail); in prb_reserve() 1521 e->rb = rb; in prb_reserve() 1881 tail_seq = prb_first_seq(rb); in _prb_read_valid() 2042 rb->desc_ring.count_bits = descbits; in prb_init() 2043 rb->desc_ring.descs = descs; in prb_init() 2044 rb->desc_ring.infos = infos; in prb_init() 2049 rb->text_data_ring.data = text_buf; in prb_init() [all …]
|
A D | printk_ringbuffer.h | 105 struct printk_ringbuffer *rb; member 303 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 310 void prb_init(struct printk_ringbuffer *rb, 353 #define prb_for_each_record(from, rb, s, r) \ argument 354 for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1) 371 #define prb_for_each_info(from, rb, s, i, lc) \ argument 372 for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1) 374 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 376 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 379 u64 prb_first_valid_seq(struct printk_ringbuffer *rb); [all …]
|
/linux/drivers/gpu/drm/amd/display/dmub/inc/ |
A D | dmub_cmd.h | 2692 return (rb->wrpt == rb->rptr); in dmub_rb_empty() 2706 if (rb->wrpt >= rb->rptr) in dmub_rb_full() 2707 data_count = rb->wrpt - rb->rptr; in dmub_rb_full() 2709 data_count = rb->capacity - (rb->rptr - rb->wrpt); in dmub_rb_full() 2738 if (rb->wrpt >= rb->capacity) in dmub_rb_push_front() 2739 rb->wrpt %= rb->capacity; in dmub_rb_push_front() 2755 uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; in dmub_rb_out_push_front() 2765 if (rb->wrpt >= rb->capacity) in dmub_rb_out_push_front() 2766 rb->wrpt %= rb->capacity; in dmub_rb_out_push_front() 2871 if (rb->rptr >= rb->capacity) in dmub_rb_pop_front() [all …]
|
/linux/drivers/gpu/drm/ |
A D | drm_mm.c | 175 rb = &hole_node->rb; in drm_mm_interval_tree_add_node() 177 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node() 182 rb = rb_parent(rb); in drm_mm_interval_tree_add_node() 185 rb = &hole_node->rb; in drm_mm_interval_tree_add_node() 189 rb = NULL; in drm_mm_interval_tree_add_node() 195 rb = *link; in drm_mm_interval_tree_add_node() 196 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node() 207 rb_link_node(&node->rb, rb, link); in drm_mm_interval_tree_add_node() 316 rb = rb->rb_right; in best_hole() 318 rb = rb->rb_left; in best_hole() [all …]
|
A D | drm_prime.c | 115 rb = NULL; in drm_prime_add_buf_handle() 120 rb = *p; in drm_prime_add_buf_handle() 130 rb = NULL; in drm_prime_add_buf_handle() 135 rb = *p; in drm_prime_add_buf_handle() 154 while (rb) { in drm_prime_lookup_buf_by_handle() 161 rb = rb->rb_right; in drm_prime_lookup_buf_by_handle() 163 rb = rb->rb_left; in drm_prime_lookup_buf_by_handle() 184 rb = rb->rb_right; in drm_prime_lookup_buf_handle() 186 rb = rb->rb_left; in drm_prime_lookup_buf_handle() 211 rb = rb->rb_right; in drm_prime_remove_buf_handle_locked() [all …]
|
/linux/mm/ |
A D | interval_tree.c | 23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, 38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after() 40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after() 42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after() 43 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 46 while (parent->shared.rb.rb_left) { in vma_interval_tree_insert_after() 47 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after() 48 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 52 link = &parent->shared.rb.rb_left; in vma_interval_tree_insert_after() 56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after() [all …]
|
/linux/drivers/target/iscsi/ |
A D | iscsi_target_configfs.c | 52 return rb; in lio_target_np_driver_show() 520 rb += sprintf(page+rb, in lio_target_nacl_info_show() 526 rb += sprintf(page+rb, "Session State: "); in lio_target_nacl_info_show() 569 rb += sprintf(page+rb, in lio_target_nacl_info_show() 573 rb += sprintf(page+rb, in lio_target_nacl_info_show() 577 rb += sprintf(page+rb, in lio_target_nacl_info_show() 581 rb += sprintf(page+rb, in lio_target_nacl_info_show() 585 rb += sprintf(page+rb, in lio_target_nacl_info_show() 589 rb += sprintf(page+rb, in lio_target_nacl_info_show() 593 rb += sprintf(page+rb, in lio_target_nacl_info_show() [all …]
|
/linux/fs/jffs2/ |
A D | nodelist.h | 230 struct rb_node rb; member 271 struct rb_node rb; member 347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) 348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) 350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) 351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) 354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 357 #define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb) [all …]
|
/linux/net/sunrpc/xprtrdma/ |
A D | verbs.c | 882 if (!rb) in rpcrdma_req_setup() 1263 rb = kmalloc(sizeof(*rb), flags); in rpcrdma_regbuf_alloc() 1264 if (!rb) in rpcrdma_regbuf_alloc() 1268 kfree(rb); in rpcrdma_regbuf_alloc() 1275 return rb; in rpcrdma_regbuf_alloc() 1318 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), in __rpcrdma_regbuf_dma_map() 1319 rdmab_length(rb), rb->rg_direction); in __rpcrdma_regbuf_dma_map() 1332 if (!rb) in rpcrdma_regbuf_dma_unmap() 1338 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), in rpcrdma_regbuf_dma_unmap() 1346 if (rb) in rpcrdma_regbuf_free() [all …]
|
/linux/arch/powerpc/mm/book3s64/ |
A D | radix_tlb.c | 32 unsigned long rb; in tlbiel_radix_set_isa300() local 103 unsigned long rb,rs,prs,r; in __tlbiel_pid() local 105 rb = PPC_BIT(53); /* IS = 1 */ in __tlbiel_pid() 118 unsigned long rb,rs,prs,r; in __tlbie_pid() local 120 rb = PPC_BIT(53); /* IS = 1 */ in __tlbie_pid() 134 unsigned long rb, rs, prs, r; in __tlbie_pid_lpid() local 147 unsigned long rb,rs,prs,r; in __tlbie_lpid() local 161 unsigned long rb,rs,prs,r; in __tlbie_lpid_guest() local 176 unsigned long rb,rs,prs,r; in __tlbiel_va() local 192 unsigned long rb,rs,prs,r; in __tlbie_va() local [all …]
|
/linux/drivers/xen/xenbus/ |
A D | xenbus_dev_frontend.c | 151 unsigned sz = min((unsigned)len - i, rb->len - rb->cons); in xenbus_file_read() 153 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); in xenbus_file_read() 156 rb->cons += sz - ret; in xenbus_file_read() 165 if (rb->cons == rb->len) { in xenbus_file_read() 167 kfree(rb); in xenbus_file_read() 198 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); in queue_reply() 199 if (rb == NULL) in queue_reply() 202 rb->cons = 0; in queue_reply() 203 rb->len = len; in queue_reply() 222 kfree(rb); in queue_cleanup() [all …]
|
/linux/tools/testing/selftests/bpf/benchs/ |
A D | run_bench_ringbufs.sh | 8 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 13 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 18 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 41 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
|
/linux/arch/arm/lib/ |
A D | getuser.S | 49 rb .req ip label 51 3: ldrbt rb, [r0], #0 53 rb .req r0 label 55 3: ldrb rb, [r0, #1] 58 orr r2, r2, rb, lsl #8 60 orr r2, rb, r2, lsl #8 117 rb .req ip label 119 10: ldrbt rb, [r0], #0 121 rb .req r0 label 123 10: ldrb rb, [r0, #1] [all …]
|
/linux/tools/include/linux/ |
A D | rbtree_augmented.h | 79 RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 81 while (rb != stop) { \ 82 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ 85 rb = rb_parent(&node->RBFIELD); \ 155 #define rb_color(rb) __rb_color((rb)->__rb_parent_color) argument 156 #define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) argument 157 #define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) argument 159 static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) in rb_set_parent() argument 161 rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; in rb_set_parent() 164 static inline void rb_set_parent_color(struct rb_node *rb, in rb_set_parent_color() argument [all …]
|
/linux/tools/perf/ |
A D | builtin-lock.c | 148 while (*rb) { in thread_stat_insert() 149 p = container_of(*rb, struct thread_stat, rb); in thread_stat_insert() 150 parent = *rb; in thread_stat_insert() 153 rb = &(*rb)->rb_left; in thread_stat_insert() 155 rb = &(*rb)->rb_right; in thread_stat_insert() 160 rb_link_node(&new->rb, parent, rb); in thread_stat_insert() 289 while (*rb) { in insert_to_result() 290 p = container_of(*rb, struct lock_stat, rb); in insert_to_result() 294 rb = &(*rb)->rb_left; in insert_to_result() 296 rb = &(*rb)->rb_right; in insert_to_result() [all …]
|