/linux/drivers/target/ |
A D | target_core_iblock.c | 61 if (!ib_dev) { in iblock_alloc_device() 66 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), in iblock_alloc_device() 68 if (!ib_dev->ibd_plug) in iblock_alloc_device() 73 return &ib_dev->dev; in iblock_alloc_device() 76 kfree(ib_dev); in iblock_alloc_device() 102 ib_dev->ibd_udev_path); in iblock_configure_device() 110 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); in iblock_configure_device() 115 ib_dev->ibd_bd = bd; in iblock_configure_device() 185 kfree(ib_dev->ibd_plug); in iblock_dev_call_rcu() 186 kfree(ib_dev); in iblock_dev_call_rcu() [all …]
|
/linux/drivers/infiniband/core/ |
A D | roce_gid_mgmt.c | 87 if (!rdma_protocol_roce(ib_dev, port)) in roce_gid_type_mask_support() 110 ib_cache_gid_add(ib_dev, port, in update_gid() 114 ib_cache_gid_del(ib_dev, port, in update_gid() 283 struct ib_device *ib_dev, in update_gid_ip() argument 368 update_gid_ip(GID_ADD, ib_dev, port, ndev, in enum_netdev_ipv4_ips() 424 enum_netdev_ipv4_ips(ib_dev, port, ndev); in _add_netdev_ips() 426 enum_netdev_ipv6_ips(ib_dev, port, ndev); in _add_netdev_ips() 432 _add_netdev_ips(ib_dev, port, cookie); in add_netdev_ips() 497 if (is_eth_port_of_netdev_filter(ib_dev, port, in enum_all_gids_of_dev_cb() 499 _add_netdev_ips(ib_dev, port, ndev); in enum_all_gids_of_dev_cb() [all …]
|
A D | cache.c | 128 event.device = ib_dev; in dispatch_gid_change_event() 574 attr->device = ib_dev; in __ib_cache_gid_add() 619 del_gid(ib_dev, port, table, ix); in _ib_cache_gid_del() 655 del_gid(ib_dev, port, table, ix); in ib_cache_gid_del_all_netdev_gids() 832 del_gid(ib_dev, port, table, i); in cleanup_gid_table_port() 889 rdma_for_each_port (ib_dev, p) { in gid_table_release_one() 890 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); in gid_table_release_one() 912 gid_table_release_one(ib_dev); in _gid_table_setup_one() 920 rdma_for_each_port (ib_dev, p) in gid_table_cleanup_one() 921 cleanup_gid_table_port(ib_dev, p, in gid_table_cleanup_one() [all …]
|
A D | device.c | 820 pdata->ib_dev = device; in alloc_port_data() 1474 disable_device(ib_dev); in __ib_unregister_device() 1477 free_netdevs(ib_dev); in __ib_unregister_device() 1480 device_del(&ib_dev->dev); in __ib_unregister_device() 1513 get_device(&ib_dev->dev); in ib_unregister_device() 1515 put_device(&ib_dev->dev); in ib_unregister_device() 1536 get_device(&ib_dev->dev); in ib_unregister_device_and_put() 1537 ib_device_put(ib_dev); in ib_unregister_device_and_put() 2179 if (!ib_dev->port_data) in free_netdevs() 2223 res = ib_dev->ops.get_netdev(ib_dev, port); in ib_device_get_netdev() [all …]
|
A D | uverbs_std_types_device.c | 156 if (rdma_cap_opa_ah(ib_dev, port_num)) { in copy_port_attr_to_resp() 179 struct ib_device *ib_dev; in UVERBS_HANDLER() local 189 ib_dev = ucontext->device; in UVERBS_HANDLER() 192 if (!ib_dev->ops.query_port) in UVERBS_HANDLER() 245 struct ib_device *ib_dev; in UVERBS_HANDLER() local 252 ib_dev = ucontext->device; in UVERBS_HANDLER() 254 if (!ib_dev->ops.query_ucontext) in UVERBS_HANDLER() 317 struct ib_device *ib_dev; in UVERBS_HANDLER() local 346 ib_dev = ucontext->device; in UVERBS_HANDLER() 373 struct ib_device *ib_dev; in UVERBS_HANDLER() local [all …]
|
A D | uverbs_main.c | 188 struct ib_device *ib_dev; in ib_uverbs_release_file() local 194 ib_dev = srcu_dereference(file->device->ib_dev, in ib_uverbs_release_file() 196 if (ib_dev && !ib_dev->ops.disassociate_ucontext) in ib_uverbs_release_file() 888 struct ib_device *ib_dev; in ib_uverbs_open() local 900 ib_dev = srcu_dereference(dev->ib_dev, in ib_uverbs_open() 902 if (!ib_dev) { in ib_uverbs_open() 1044 struct ib_device *ib_dev; in ibdev_show() local 1047 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); in ibdev_show() 1048 if (ib_dev) in ibdev_show() 1066 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); in abi_version_show() [all …]
|
A D | uverbs_std_types_dm.c | 56 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 60 if (!ib_dev->ops.alloc_dm) in UVERBS_HANDLER() 73 dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs); in UVERBS_HANDLER() 77 dm->device = ib_dev; in UVERBS_HANDLER()
|
A D | uverbs_std_types_mr.c | 53 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local 60 if (!ib_dev->ops.advise_mr) in UVERBS_HANDLER() 79 return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge, in UVERBS_HANDLER() 93 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local 98 if (!ib_dev->ops.reg_dm_mr) in UVERBS_HANDLER() 119 ret = ib_check_mr_access(ib_dev, attr.access_flags); in UVERBS_HANDLER() 193 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local 200 if (!ib_dev->ops.reg_user_mr_dmabuf) in UVERBS_HANDLER() 236 ret = ib_check_mr_access(ib_dev, access_flags); in UVERBS_HANDLER()
|
A D | uverbs_std_types_counters.c | 60 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 69 if (!ib_dev->ops.create_counters) in UVERBS_HANDLER() 72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER() 76 counters->device = ib_dev; in UVERBS_HANDLER() 81 ret = ib_dev->ops.create_counters(counters, attrs); in UVERBS_HANDLER()
|
A D | uverbs_std_types_cq.c | 67 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 75 if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq) in UVERBS_HANDLER() 115 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); in UVERBS_HANDLER() 121 cq->device = ib_dev; in UVERBS_HANDLER() 131 ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); in UVERBS_HANDLER()
|
A D | core_priv.h | 90 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 93 void ib_enum_roce_netdev(struct ib_device *ib_dev, 129 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, 134 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 137 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 140 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 146 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
|
A D | uverbs_cmd.c | 212 ib_dev = srcu_dereference(ufile->device->ib_dev, in ib_alloc_ucontext() 214 if (!ib_dev) in ib_alloc_ucontext() 442 pd->device = ib_dev; in ib_uverbs_alloc_pd() 607 &ib_dev); in ib_uverbs_open_xrcd() 935 mw->device = ib_dev; in ib_uverbs_alloc_mw() 1016 &ib_dev); in create_cq() 1310 &ib_dev); in create_qp() 1564 &ib_dev); in ib_uverbs_open_qp() 2893 &ib_dev); in ib_uverbs_ex_create_wq() 3365 &ib_dev); in __uverbs_create_xsrq() [all …]
|
A D | uverbs_std_types_flow_action.c | 225 static int parse_flow_action_esp(struct ib_device *ib_dev, in parse_flow_action_esp() argument 311 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 316 if (!ib_dev->ops.create_flow_action_esp) in UVERBS_HANDLER() 319 ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false); in UVERBS_HANDLER() 324 action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr, in UVERBS_HANDLER() 329 uverbs_flow_action_fill_action(action, uobj, ib_dev, in UVERBS_HANDLER()
|
/linux/drivers/infiniband/hw/usnic/ |
A D | usnic_ib_main.c | 150 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 163 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 179 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 189 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event() 204 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event() 216 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 260 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_inet_event() 270 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_inet_event() 277 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_inet_event() 436 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_device_add() [all …]
|
A D | usnic_ib_sysfs.c | 53 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in board_id_show() 71 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in config_show() 88 dev_name(&us_ibdev->ib_dev.dev), in config_show() 110 dev_name(&us_ibdev->ib_dev.dev)); in config_show() 123 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in iface_show() 133 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in max_vf_show() 143 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in qp_per_vf_show() 157 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in cq_per_vf_show() 255 kobject_get(&us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev() 257 &us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev() [all …]
|
/linux/drivers/infiniband/hw/hns/ |
A D | hns_roce_main.c | 171 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_device() 220 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_port() 465 ret = ib_query_port(ib_dev, port_num, &attr); in hns_roce_port_immutable() 503 ib_unregister_device(&hr_dev->ib_dev); in hns_roce_unregister_device() 582 struct ib_device *ib_dev = NULL; in hns_roce_register_device() local 589 ib_dev = &hr_dev->ib_dev; in hns_roce_register_device() 591 ib_dev->node_type = RDMA_NODE_IB_CA; in hns_roce_register_device() 592 ib_dev->dev.parent = dev; in hns_roce_register_device() 594 ib_dev->phys_port_cnt = hr_dev->caps.num_ports; in hns_roce_register_device() 616 ib_set_device_ops(ib_dev, &hns_roce_dev_ops); in hns_roce_register_device() [all …]
|
A D | hns_roce_pd.c | 48 struct ib_device *ib_dev = ibpd->device; in hns_roce_alloc_pd() local 49 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_alloc_pd() 58 ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id); in hns_roce_alloc_pd() 70 ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); in hns_roce_alloc_pd() 96 ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id); in hns_roce_uar_alloc() 140 ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id); in hns_roce_xrcd_alloc()
|
A D | hns_roce_qp.c | 237 ibdev_err(&hr_dev->ib_dev, in alloc_qpn() 482 ibdev_err(&hr_dev->ib_dev, in set_rq_size() 584 struct ib_device *ibdev = &hr_dev->ib_dev; in set_user_sq_size() 661 struct ib_device *ibdev = &hr_dev->ib_dev; in set_kernel_sq_size() 754 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_qp_buf() 834 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_user_qp_db() 1238 ibdev_err(&hr_dev->ib_dev, in check_mtu_validate() 1264 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr() 1273 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr() 1281 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr() [all …]
|
A D | hns_roce_mr.c | 72 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_mr_key() 110 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_mr_pbl() 144 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_mr_free() 294 struct ib_device *ib_dev = &hr_dev->ib_dev; in hns_roce_rereg_user_mr() local 332 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n", in hns_roce_rereg_user_mr() 444 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_map_mr_sg() 555 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_alloc_mw() 694 struct ib_device *ibdev = &hr_dev->ib_dev; in mtr_alloc_bufs() 727 struct ib_device *ibdev = &hr_dev->ib_dev; in mtr_map_bufs() 774 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_mtr_map() [all …]
|
/linux/drivers/infiniband/ulp/isert/ |
A D | ib_isert.c | 107 struct ib_device *ib_dev = device->ib_device; in isert_create_qp() local 151 struct ib_device *ib_dev = device->ib_device; in isert_alloc_rx_descriptors() local 168 if (ib_dma_mapping_error(ib_dev, dma_addr)) in isert_alloc_rx_descriptors() 217 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res() local 221 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); in isert_create_device_ib_res() 224 device->pd = ib_alloc_pd(ib_dev, 0); in isert_create_device_ib_res() 334 struct ib_device *ib_dev) in isert_alloc_login_buf() argument 1523 isert_unmap_tx_desc(tx_desc, ib_dev); in isert_completion_put() 1677 ib_dev, false); in isert_do_control_comp() 1702 isert_unmap_tx_desc(tx_desc, ib_dev); in isert_login_send_done() [all …]
|
/linux/drivers/infiniband/sw/rxe/ |
A D | rxe_sysfs.c | 62 ib_device_put(&exists->ib_dev); in rxe_param_set_add() 83 struct ib_device *ib_dev; in rxe_param_set_remove() local 97 ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE); in rxe_param_set_remove() 98 if (!ib_dev) { in rxe_param_set_remove() 103 ib_unregister_device_and_put(ib_dev); in rxe_param_set_remove()
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | mad.c | 607 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_slave() 1180 struct mlx4_ib_dev *dev = ew->ib_dev; in handle_port_mgmt_change_event() 1284 event.device = &dev->ib_dev; in mlx4_ib_dispatch_event() 1419 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_wire() 1568 ah.ibah.device = ctx->ib_dev; in mlx4_ib_multiplex_mad() 1658 ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs() 1662 if (ib_dma_mapping_error(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs() 1965 ctx->ib_dev = &dev->ib_dev; in alloc_pv_object() 2015 ctx->cq = ib_create_cq(ctx->ib_dev, in create_pv_resources() 2024 ctx->pd = ib_alloc_pd(ctx->ib_dev, 0); in create_pv_resources() [all …]
|
A D | main.c | 2240 ib_set_device_ops(&ibdev->ib_dev, in mlx4_ib_alloc_diag_counters() 2377 ibev.device = &ibdev->ib_dev; in mlx4_ib_scan_netdevs() 2480 ibdev->ib_dev.num_comp_vectors = 0; in mlx4_ib_free_eqs() 2703 ibdev->ib_dev.ops.uverbs_abi_ver = in mlx4_ib_add() 2842 &ibdev->ib_dev); in mlx4_ib_add() 2898 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_add() 3004 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_remove() 3146 ibev.device = &ibdev->ib_dev; in handle_bonded_port_state_event() 3188 ew->ib_dev = ibdev; in mlx4_sched_ib_sl2vl_update_work() 3209 ew->ib_dev = ibdev; in mlx4_ib_event() [all …]
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_main.c | 208 dev->ib_dev.num_comp_vectors = 1; in pvrdma_register_device() 209 dev->ib_dev.dev.parent = &dev->pdev->dev; in pvrdma_register_device() 211 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in pvrdma_register_device() 371 ib_event.device = &dev->ib_dev; in pvrdma_dispatch_event() 685 ib_device_set_netdev(&dev->ib_dev, NULL, 1); in pvrdma_netdevice_event_handle() 706 event, dev_name(&dev->ib_dev.dev)); in pvrdma_netdevice_event_handle() 764 dev = ib_alloc_device(pvrdma_dev, ib_dev); in pvrdma_pci_probe() 1016 ib_unregister_device(&dev->ib_dev); in pvrdma_pci_probe() 1051 ib_dealloc_device(&dev->ib_dev); in pvrdma_pci_probe() 1075 ib_unregister_device(&dev->ib_dev); in pvrdma_pci_remove() [all …]
|
/linux/drivers/infiniband/hw/mthca/ |
A D | mthca_provider.c | 1028 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); in mthca_init_node_data() 1058 container_of(device, struct mthca_dev, ib_dev); in get_dev_fw_str() 1150 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in mthca_register_device() 1152 dev->ib_dev.num_comp_vectors = 1; in mthca_register_device() 1153 dev->ib_dev.dev.parent = &dev->pdev->dev; in mthca_register_device() 1157 ib_set_device_ops(&dev->ib_dev, in mthca_register_device() 1160 ib_set_device_ops(&dev->ib_dev, in mthca_register_device() 1164 ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops); in mthca_register_device() 1167 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops); in mthca_register_device() 1169 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops); in mthca_register_device() [all …]
|