/linux/drivers/gpu/drm/vmwgfx/ |
A D | vmwgfx_binding.c | 740 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = in vmw_collect_view_ids() 776 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = in vmw_collect_dirty_view_ids() 827 cbs->bind_first_slot, cbs->bind_cmd_count); in vmw_emit_set_sr() 857 if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed) in vmw_emit_set_rt() 1039 vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb, in vmw_emit_set_vb() 1058 cbs->bind_first_slot, cbs->bind_cmd_count); in vmw_emit_set_vb() 1352 cbs = vzalloc(sizeof(*cbs)); in vmw_binding_state_alloc() 1353 if (!cbs) { in vmw_binding_state_alloc() 1361 return cbs; in vmw_binding_state_alloc() 1374 vfree(cbs); in vmw_binding_state_free() [all …]
|
A D | vmwgfx_binding.h | 217 extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, 220 extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, 227 extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs); 228 extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs); 229 extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs); 232 extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs); 234 vmw_binding_state_list(struct vmw_ctx_binding_state *cbs); 235 extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
|
A D | vmwgfx_context.c | 37 struct vmw_ctx_binding_state *cbs; member 154 vmw_binding_state_kill(uctx->cbs); in vmw_hw_context_destroy() 205 uctx->cbs = vmw_binding_state_alloc(dev_priv); in vmw_gb_context_init() 206 if (IS_ERR(uctx->cbs)) { in vmw_gb_context_init() 207 ret = PTR_ERR(uctx->cbs); in vmw_gb_context_init() 391 vmw_binding_state_scrub(uctx->cbs); in vmw_gb_context_unbind() 557 vmw_binding_state_scrub(uctx->cbs); in vmw_dx_context_scrub_cotables() 691 if (ctx->cbs) in vmw_user_context_free() 692 vmw_binding_state_free(ctx->cbs); in vmw_user_context_free() 833 return vmw_binding_state_list(uctx->cbs); in vmw_context_binding_list() [all …]
|
/linux/kernel/bpf/ |
A D | disasm.c | 27 if (cbs && cbs->cb_call) { in __func_get_name() 30 res = cbs->cb_call(cbs->private_data, insn); in __func_get_name() 47 if (cbs && cbs->cb_imm) in __func_imm_name() 48 return cbs->cb_imm(cbs->private_data, insn, full_imm); in __func_imm_name() 135 const bpf_insn_print_t verbose = cbs->cb_print; in print_bpf_insn() 255 __func_imm_name(cbs, insn, imm, in print_bpf_insn() 270 __func_get_name(cbs, insn, in print_bpf_insn() 275 __func_get_name(cbs, insn, in print_bpf_insn() 285 verbose(cbs->private_data, in print_bpf_insn() 293 verbose(cbs->private_data, in print_bpf_insn() [all …]
|
A D | disasm.h | 37 void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
/linux/drivers/net/ethernet/mscc/ |
A D | ocelot_police.c | 27 u32 cir = 0, cbs = 0, pir = 0, pbs = 0; in qos_policer_conf_set() local 48 cbs = conf->cbs; in qos_policer_conf_set() 49 if (cir == 0 && cbs == 0) { in qos_policer_conf_set() 55 cbs = DIV_ROUND_UP(cbs, 4096); in qos_policer_conf_set() 56 cbs = (cbs ? cbs : 1); /* No zero burst size */ in qos_policer_conf_set() 122 if (cbs > cbs_max) { in qos_policer_conf_set() 124 port, cbs, cbs_max); in qos_policer_conf_set() 147 ANA_POL_CIR_CFG_CIR_BURST(cbs), in qos_policer_conf_set()
|
A D | ocelot_police.h | 27 u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */ member
|
/linux/net/sched/ |
A D | sch_cbs.c | 253 struct tc_cbs_qopt_offload cbs = { }; in cbs_disable_offload() local 267 cbs.queue = q->queue; in cbs_disable_offload() 268 cbs.enable = 0; in cbs_disable_offload() 273 cbs.queue); in cbs_disable_offload() 281 struct tc_cbs_qopt_offload cbs = { }; in cbs_enable_offload() local 289 cbs.queue = q->queue; in cbs_enable_offload() 291 cbs.enable = 1; in cbs_enable_offload() 292 cbs.hicredit = opt->hicredit; in cbs_enable_offload() 293 cbs.locredit = opt->locredit; in cbs_enable_offload() 294 cbs.idleslope = opt->idleslope; in cbs_enable_offload() [all …]
|
/linux/drivers/net/ethernet/qlogic/qed/ |
A D | qed_ll2.c | 106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) in qed_ll2b_complete_tx_packet() 222 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { in qed_ll2b_complete_rx_packet() 398 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, in qed_ll2_txq_completion() 468 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, in qed_ll2_handle_slowpath() 1327 if (!cbs || (!cbs->rx_comp_cb || in qed_ll2_set_cbs() 1329 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) in qed_ll2_set_cbs() 1332 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; in qed_ll2_set_cbs() 1333 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; in qed_ll2_set_cbs() 1334 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; in qed_ll2_set_cbs() 1336 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; in qed_ll2_set_cbs() [all …]
|
A D | qed_iwarp.c | 2641 struct qed_ll2_cbs cbs; in qed_iwarp_ll2_start() local 2661 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; in qed_iwarp_ll2_start() 2662 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; in qed_iwarp_ll2_start() 2663 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; in qed_iwarp_ll2_start() 2664 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; in qed_iwarp_ll2_start() 2665 cbs.slowpath_cb = NULL; in qed_iwarp_ll2_start() 2666 cbs.cookie = p_hwfn; in qed_iwarp_ll2_start() 2679 data.cbs = &cbs; in qed_iwarp_ll2_start() 2728 cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; in qed_iwarp_ll2_start() 2729 cbs.slowpath_cb = qed_iwarp_ll2_slowpath; in qed_iwarp_ll2_start() [all …]
|
/linux/drivers/misc/sgi-gru/ |
A D | gru_instructions.h | 638 struct gru_control_block_status *cbs = (void *)cb; in gru_get_cb_status() local 640 return cbs->istatus; in gru_get_cb_status() 646 struct gru_control_block_status *cbs = (void *)cb; in gru_get_cb_message_queue_substatus() local 648 return cbs->isubstatus & CBSS_MSG_QUEUE_MASK; in gru_get_cb_message_queue_substatus() 654 struct gru_control_block_status *cbs = (void *)cb; in gru_get_cb_substatus() local 656 return cbs->isubstatus; in gru_get_cb_substatus() 667 struct gru_control_block_status *cbs = (void *)cb; in gru_check_status() local 670 ret = cbs->istatus; in gru_check_status()
|
/linux/tools/bpf/bpftool/ |
A D | xlated_dumper.c | 215 const struct bpf_insn_cbs cbs = { in dump_xlated_json() local 264 print_bpf_insn(&cbs, insn + i, true); in dump_xlated_json() 300 const struct bpf_insn_cbs cbs = { in dump_xlated_plain() local 346 print_bpf_insn(&cbs, insn + i, true); in dump_xlated_plain() 363 const struct bpf_insn_cbs cbs = { in dump_xlated_for_graph() local 375 print_bpf_insn(&cbs, cur, true); in dump_xlated_for_graph()
|
/linux/drivers/gpu/drm/nouveau/dispnv04/ |
A D | arb.c | 58 int found, mclk_extra, mclk_loop, cbs, m1, p1; in nv04_calc_arb() local 69 cbs = 128; in nv04_calc_arb() 92 m1 = clwm + cbs - 512; in nv04_calc_arb() 103 fifo->burst = cbs; in nv04_calc_arb()
|
/linux/drivers/net/dsa/sja1105/ |
A D | sja1105_main.c | 2094 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) in sja1105_find_unused_cbs_shaper() 2106 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_delete_cbs_shaper() local 2108 if (cbs->port == port && cbs->prio == prio) { in sja1105_delete_cbs_shaper() 2109 memset(cbs, 0, sizeof(*cbs)); in sja1105_delete_cbs_shaper() 2111 i, cbs, true); in sja1105_delete_cbs_shaper() 2132 cbs = &priv->cbs[index]; in sja1105_setup_tc_cbs() 2133 cbs->port = port; in sja1105_setup_tc_cbs() 2161 if (!priv->cbs) in sja1105_reload_cbs() 2165 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_reload_cbs() local 2167 if (!cbs->idle_slope && !cbs->send_slope) in sja1105_reload_cbs() [all …]
|
/linux/drivers/net/ethernet/intel/ |
A D | e100.c | 534 struct param_range cbs; member 556 struct cb *cbs; member 1047 nic->params.cbs = cbs; in e100_get_defaults() 1854 if (nic->cbs) { in e100_clean_cbs() 1868 nic->cbs = NULL; in e100_clean_cbs() 1873 nic->cbs; in e100_clean_cbs() 1887 if (!nic->cbs) in e100_alloc_cbs() 2564 struct param_range *cbs = &nic->params.cbs; in e100_get_ringparam() local 2577 struct param_range *cbs = &nic->params.cbs; in e100_set_ringparam() local 2586 cbs->count = max(ring->tx_pending, cbs->min); in e100_set_ringparam() [all …]
|
/linux/kernel/rcu/ |
A D | tree_stall.h | 807 unsigned long cbs = 0; in show_rcu_gp_kthreads() local 861 cbs += data_race(READ_ONCE(rdp->n_cbs_invoked)); in show_rcu_gp_kthreads() 865 pr_info("RCU callbacks invoked since boot: %lu\n", cbs); in show_rcu_gp_kthreads() 935 unsigned long cbs; in rcu_fwd_progress_check() local 954 cbs = rcu_get_n_cbs_cpu(cpu); in rcu_fwd_progress_check() 955 if (!cbs) in rcu_fwd_progress_check() 959 pr_cont(" %d: %lu", cpu, cbs); in rcu_fwd_progress_check() 960 if (cbs <= max_cbs) in rcu_fwd_progress_check() 962 max_cbs = cbs; in rcu_fwd_progress_check()
|
/linux/drivers/infiniband/hw/qedr/ |
A D | qedr_roce_cm.c | 266 struct qed_ll2_cbs cbs; in qedr_ll2_start() local 270 cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; in qedr_ll2_start() 271 cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; in qedr_ll2_start() 272 cbs.rx_release_cb = qedr_ll2_release_rx_packet; in qedr_ll2_start() 273 cbs.tx_release_cb = qedr_ll2_complete_tx_packet; in qedr_ll2_start() 274 cbs.cookie = dev; in qedr_ll2_start() 289 data.cbs = &cbs; in qedr_ll2_start()
|
/linux/drivers/video/fbdev/nvidia/ |
A D | nv_hw.c | 263 cbs = 128; in nv4CalcArbitration() 307 cbs * 1000 * 1000 / (8 * width) / in nv4CalcArbitration() 341 m1 = clwm + cbs - 512; in nv4CalcArbitration() 420 int found, mclk_extra, mclk_loop, cbs, m1; in nv10CalcArbitration() local 441 cbs = 512; in nv10CalcArbitration() 559 cbs = 512; in nv10CalcArbitration() 583 if (cbs <= 32) { in nv10CalcArbitration() 588 cbs = cbs / 2; in nv10CalcArbitration() 605 if (clwm < (1024 - cbs + 8)) in nv10CalcArbitration() 606 clwm = 1024 - cbs + 8; in nv10CalcArbitration() [all …]
|
/linux/drivers/net/ethernet/freescale/enetc/ |
A D | enetc_qos.c | 182 struct tc_cbs_qopt_offload *cbs = type_data; in enetc_setup_tc_cbs() local 189 u8 tc = cbs->queue; in enetc_setup_tc_cbs() 201 if (!cbs->enable) { in enetc_setup_tc_cbs() 219 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || in enetc_setup_tc_cbs() 220 cbs->idleslope < 0 || cbs->sendslope > 0) in enetc_setup_tc_cbs() 225 bw = cbs->idleslope / (port_transmit_rate * 10UL); in enetc_setup_tc_cbs() 405 u32 cbs; member 876 fmi_config->cbs = cpu_to_le32(fmi->cbs); in enetc_flowmeter_hw_set() 1246 fmi->cbs = entryp->police.burst; in enetc_psfp_parse_clsflower()
|
/linux/drivers/net/ethernet/amd/xgbe/ |
A D | xgbe-dcb.c | 132 ets->cbs = pdata->ets->cbs; in xgbe_dcb_ieee_getets()
|
/linux/drivers/video/fbdev/riva/ |
A D | riva_hw.c | 660 int found, mclk_extra, mclk_loop, cbs, m1, p1; in nv4CalcArbitration() local 677 cbs = 128; in nv4CalcArbitration() 718 video_fill_us = cbs*1000*1000 / 16 / nvclk_freq ; in nv4CalcArbitration() 751 m1 = clwm + cbs - 512; in nv4CalcArbitration() 842 int found, mclk_extra, mclk_loop, cbs, m1; in nv10CalcArbitration() local 863 cbs = 512; in nv10CalcArbitration() 986 cbs = 512; in nv10CalcArbitration() 1001 m1 = clwm + cbs - 1024; /* Amount of overfill */ in nv10CalcArbitration() 1012 if(cbs <= 32) { in nv10CalcArbitration() 1015 cbs = cbs/2; /* reduce the burst size */ in nv10CalcArbitration() [all …]
|
/linux/Documentation/networking/device_drivers/ethernet/ti/ |
A D | cpsw.rst | 232 $ tc qdisc add dev eth0 parent 100:1 cbs locredit -1438 \ 240 $ tc qdisc add dev eth0 parent 100:2 cbs locredit -1468 \ 454 $ tc qdisc add dev eth0 parent 100:1 cbs locredit -1470 \ 461 $ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \ 514 $ tc qdisc add dev eth1 parent 100:3 cbs locredit -1035 \ 522 $ tc qdisc add dev eth1 parent 100:4 cbs locredit -1335 \
|
/linux/drivers/dma/ppc4xx/ |
A D | xor.h | 73 u32 cbs; /* status */ member
|
/linux/drivers/net/ethernet/intel/i40e/ |
A D | i40e_dcb_nl.c | 47 ets->cbs = dcbxcfg->etscfg.cbs; in i40e_dcbnl_ieee_getets() 124 pf->tmp_cfg.etscfg.cbs = ets->cbs; in i40e_dcbnl_ieee_setets()
|
/linux/drivers/net/ethernet/intel/fm10k/ |
A D | fm10k_dcbnl.c | 17 ets->cbs = 0; in fm10k_dcbnl_ieee_getets()
|