/linux/drivers/interconnect/ |
A D | bulk.c | 15 int __must_check of_icc_bulk_get(struct device *dev, int num_paths, in of_icc_bulk_get() argument 20 for (i = 0; i < num_paths; i++) { in of_icc_bulk_get() 46 void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) in icc_bulk_put() argument 48 while (--num_paths >= 0) { in icc_bulk_put() 49 icc_put(paths[num_paths].path); in icc_bulk_put() 50 paths[num_paths].path = NULL; in icc_bulk_put() 62 int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths) in icc_bulk_set_bw() argument 67 for (i = 0; i < num_paths; i++) { in icc_bulk_set_bw() 90 for (i = 0; i < num_paths; i++) { in icc_bulk_enable() 114 while (--num_paths >= 0) in icc_bulk_disable() [all …]
|
/linux/samples/landlock/ |
A D | sandboxer.c | 57 int i, num_paths = 0; in parse_path() local 60 num_paths++; in parse_path() 63 num_paths++; in parse_path() 66 *path_list = malloc(num_paths * sizeof(**path_list)); in parse_path() 67 for (i = 0; i < num_paths; i++) in parse_path() 70 return num_paths; in parse_path() 82 int num_paths, i, ret = 1; in populate_ruleset() local 97 num_paths = parse_path(env_path_name, &path_list); in populate_ruleset() 98 if (num_paths == 1 && path_list[0][0] == '\0') { in populate_ruleset() 107 for (i = 0; i < num_paths; i++) { in populate_ruleset()
|
/linux/include/linux/ |
A D | interconnect.h | 41 int __must_check of_icc_bulk_get(struct device *dev, int num_paths, 43 void icc_bulk_put(int num_paths, struct icc_bulk_data *paths); 44 int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths); 45 int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths); 46 void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
|
A D | netdevice.h | 884 int num_paths; member
|
/linux/drivers/net/ethernet/brocade/bna/ |
A D | bna_tx_rx.c | 1265 (qcfg)->num_paths : ((qcfg)->num_paths * 2)) 2170 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2182 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2188 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2194 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2200 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2206 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2212 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2236 mem_info->num = q_cfg->num_paths; in bna_rx_res_req() 2338 rx->num_paths = rx_cfg->num_paths; in bna_rx_create() [all …]
|
A D | bna_types.h | 657 int num_paths; member 782 int num_paths; member
|
A D | bnad.c | 2031 rx_config->num_paths = bnad->num_rxp_per_rx; in bnad_init_rx_config() 2174 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); in bnad_destroy_rx() 2222 rx_config->num_paths, in bnad_setup_rx() 2229 rx_config->num_paths, in bnad_setup_rx() 2265 rx_config->num_paths); in bnad_setup_rx()
|
/linux/drivers/gpu/drm/msm/disp/dpu1/ |
A D | dpu_core_perf.c | 233 dpu_cstate->new_perf.bw_ctl, kms->num_paths); in _dpu_core_perf_crtc_update_bus() 237 if (!kms->num_paths) in _dpu_core_perf_crtc_update_bus() 241 do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ in _dpu_core_perf_crtc_update_bus() 243 for (i = 0; i < kms->num_paths; i++) in _dpu_core_perf_crtc_update_bus()
|
A D | dpu_kms.c | 308 dpu_kms->num_paths = 1; in dpu_kms_parse_data_bus_icc_path() 312 dpu_kms->num_paths++; in dpu_kms_parse_data_bus_icc_path() 1245 for (i = 0; i < dpu_kms->num_paths; i++) in dpu_runtime_suspend() 1263 WARN_ON(!(dpu_kms->num_paths)); in dpu_runtime_resume() 1265 for (i = 0; i < dpu_kms->num_paths; i++) in dpu_runtime_resume()
|
A D | dpu_kms.h | 126 u32 num_paths; member
|
/linux/include/uapi/rdma/ |
A D | rdma_user_cm.h | 164 __u32 num_paths; member 185 __u32 num_paths; member
|
/linux/Documentation/admin-guide/device-mapper/ |
A D | switch.rst | 71 <num_paths> <region_size> <num_optional_args> [<optional_args>...] [<dev_path> <offset>]+ 72 <num_paths> 105 The path number in the range 0 ... (<num_paths> - 1).
|
/linux/drivers/opp/ |
A D | of.c | 459 int ret, i, count, num_paths; in dev_pm_opp_of_find_icc_paths() local 486 num_paths = count / 2; in dev_pm_opp_of_find_icc_paths() 487 paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL); in dev_pm_opp_of_find_icc_paths() 491 for (i = 0; i < num_paths; i++) { in dev_pm_opp_of_find_icc_paths() 505 opp_table->path_count = num_paths; in dev_pm_opp_of_find_icc_paths()
|
/linux/drivers/infiniband/core/ |
A D | ucma.c | 754 resp->num_paths = route->num_paths; in ucma_copy_ib_route() 755 switch (route->num_paths) { in ucma_copy_ib_route() 781 resp->num_paths = route->num_paths; in ucma_copy_iboe_route() 782 switch (route->num_paths) { in ucma_copy_iboe_route() 921 resp->num_paths = ctx->cm_id->route.num_paths; in ucma_query_path() 923 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); in ucma_query_path()
|
A D | cma.c | 2113 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; in cma_ib_new_conn_id() 2114 rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec), in cma_ib_new_conn_id() 2120 if (rt->num_paths == 2) in cma_ib_new_conn_id() 2698 route->num_paths = 1; in cma_query_handler() 2953 id->route.num_paths = 1; in rdma_set_ib_path() 3086 route->num_paths = 1; in cma_resolve_iboe_route() 3146 route->num_paths = 0; in cma_resolve_iboe_route() 4128 if (route->num_paths == 2) in cma_connect_ib()
|
/linux/drivers/message/fusion/ |
A D | mptscsih.c | 2130 int num_paths; in mptscsih_is_phys_disk() local 2149 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, in mptscsih_is_phys_disk() 2151 if (num_paths < 2) in mptscsih_is_phys_disk() 2154 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); in mptscsih_is_phys_disk() 2163 for (j = 0; j < num_paths; j++) { in mptscsih_is_phys_disk() 2208 int num_paths; in mptscsih_raid_id_to_num() local 2227 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, in mptscsih_raid_id_to_num() 2229 if (num_paths < 2) in mptscsih_raid_id_to_num() 2232 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); in mptscsih_raid_id_to_num() 2241 for (j = 0; j < num_paths; j++) { in mptscsih_raid_id_to_num()
|
A D | mptsas.c | 4173 int num_paths; in mptsas_find_phyinfo_by_phys_disk_num() local 4181 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num); in mptsas_find_phyinfo_by_phys_disk_num() 4182 if (!num_paths) in mptsas_find_phyinfo_by_phys_disk_num() 4185 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); in mptsas_find_phyinfo_by_phys_disk_num() 4189 for (i = 0; i < num_paths; i++) { in mptsas_find_phyinfo_by_phys_disk_num()
|
/linux/include/rdma/ |
A D | rdma_cm.h | 55 int num_paths; member
|
/linux/net/netfilter/ |
A D | nft_flow_offload.c | 99 for (i = 0; i < stack->num_paths; i++) { in nft_dev_path_info() 113 i = stack->num_paths; in nft_dev_path_info()
|
/linux/drivers/platform/x86/ |
A D | thinkpad_acpi.c | 705 char **paths, const int num_paths) in drv_acpi_handle_init() argument 713 for (i = 0; i < num_paths; i++) { in drv_acpi_handle_init()
|
/linux/net/core/ |
A D | dev.c | 736 int k = stack->num_paths++; in dev_fwd_path() 755 stack->num_paths = 0; in dev_fill_forward_path()
|