Lines Matching refs:enic
114 static void enic_init_affinity_hint(struct enic *enic) in enic_init_affinity_hint() argument
116 int numa_node = dev_to_node(&enic->pdev->dev); in enic_init_affinity_hint()
119 for (i = 0; i < enic->intr_count; i++) { in enic_init_affinity_hint()
120 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || in enic_init_affinity_hint()
121 (cpumask_available(enic->msix[i].affinity_mask) && in enic_init_affinity_hint()
122 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint()
124 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint()
127 enic->msix[i].affinity_mask); in enic_init_affinity_hint()
131 static void enic_free_affinity_hint(struct enic *enic) in enic_free_affinity_hint() argument
135 for (i = 0; i < enic->intr_count; i++) { in enic_free_affinity_hint()
136 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) in enic_free_affinity_hint()
138 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint()
142 static void enic_set_affinity_hint(struct enic *enic) in enic_set_affinity_hint() argument
147 for (i = 0; i < enic->intr_count; i++) { in enic_set_affinity_hint()
148 if (enic_is_err_intr(enic, i) || in enic_set_affinity_hint()
149 enic_is_notify_intr(enic, i) || in enic_set_affinity_hint()
150 !cpumask_available(enic->msix[i].affinity_mask) || in enic_set_affinity_hint()
151 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint()
153 err = irq_set_affinity_hint(enic->msix_entry[i].vector, in enic_set_affinity_hint()
154 enic->msix[i].affinity_mask); in enic_set_affinity_hint()
156 netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", in enic_set_affinity_hint()
160 for (i = 0; i < enic->wq_count; i++) { in enic_set_affinity_hint()
161 int wq_intr = enic_msix_wq_intr(enic, i); in enic_set_affinity_hint()
163 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && in enic_set_affinity_hint()
164 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint()
165 netif_set_xps_queue(enic->netdev, in enic_set_affinity_hint()
166 enic->msix[wq_intr].affinity_mask, in enic_set_affinity_hint()
171 static void enic_unset_affinity_hint(struct enic *enic) in enic_unset_affinity_hint() argument
175 for (i = 0; i < enic->intr_count; i++) in enic_unset_affinity_hint()
176 irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); in enic_unset_affinity_hint()
183 struct enic *enic = netdev_priv(netdev); in enic_udp_tunnel_set_port() local
186 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
188 err = vnic_dev_overlay_offload_cfg(enic->vdev, in enic_udp_tunnel_set_port()
194 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_set_port()
195 enic->vxlan.patch_level); in enic_udp_tunnel_set_port()
199 enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); in enic_udp_tunnel_set_port()
201 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
210 struct enic *enic = netdev_priv(netdev); in enic_udp_tunnel_unset_port() local
213 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
215 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_unset_port()
220 enic->vxlan.vxlan_udp_port_number = 0; in enic_udp_tunnel_unset_port()
223 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
248 struct enic *enic = netdev_priv(dev); in enic_features_check() local
260 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) in enic_features_check()
273 if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) in enic_features_check()
291 if (port != enic->vxlan.vxlan_udp_port_number) in enic_features_check()
300 int enic_is_dynamic(struct enic *enic) in enic_is_dynamic() argument
302 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; in enic_is_dynamic()
305 int enic_sriov_enabled(struct enic *enic) in enic_sriov_enabled() argument
307 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; in enic_sriov_enabled()
310 static int enic_is_sriov_vf(struct enic *enic) in enic_is_sriov_vf() argument
312 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; in enic_is_sriov_vf()
315 int enic_is_valid_vf(struct enic *enic, int vf) in enic_is_valid_vf() argument
318 return vf >= 0 && vf < enic->num_vfs; in enic_is_valid_vf()
326 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf() local
329 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_wq_buf()
332 dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_wq_buf()
348 struct enic *enic = vnic_dev_priv(vdev); in enic_wq_service() local
350 spin_lock(&enic->wq_lock[q_number]); in enic_wq_service()
352 vnic_wq_service(&enic->wq[q_number], cq_desc, in enic_wq_service()
356 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && in enic_wq_service()
357 vnic_wq_desc_avail(&enic->wq[q_number]) >= in enic_wq_service()
359 netif_wake_subqueue(enic->netdev, q_number); in enic_wq_service()
361 spin_unlock(&enic->wq_lock[q_number]); in enic_wq_service()
366 static bool enic_log_q_error(struct enic *enic) in enic_log_q_error() argument
372 for (i = 0; i < enic->wq_count; i++) { in enic_log_q_error()
373 error_status = vnic_wq_error_status(&enic->wq[i]); in enic_log_q_error()
376 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", in enic_log_q_error()
380 for (i = 0; i < enic->rq_count; i++) { in enic_log_q_error()
381 error_status = vnic_rq_error_status(&enic->rq[i]); in enic_log_q_error()
384 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", in enic_log_q_error()
391 static void enic_msglvl_check(struct enic *enic) in enic_msglvl_check() argument
393 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); in enic_msglvl_check()
395 if (msg_enable != enic->msg_enable) { in enic_msglvl_check()
396 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", in enic_msglvl_check()
397 enic->msg_enable, msg_enable); in enic_msglvl_check()
398 enic->msg_enable = msg_enable; in enic_msglvl_check()
402 static void enic_mtu_check(struct enic *enic) in enic_mtu_check() argument
404 u32 mtu = vnic_dev_mtu(enic->vdev); in enic_mtu_check()
405 struct net_device *netdev = enic->netdev; in enic_mtu_check()
407 if (mtu && mtu != enic->port_mtu) { in enic_mtu_check()
408 enic->port_mtu = mtu; in enic_mtu_check()
409 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_mtu_check()
413 schedule_work(&enic->change_mtu_work); in enic_mtu_check()
424 static void enic_link_check(struct enic *enic) in enic_link_check() argument
426 int link_status = vnic_dev_link_status(enic->vdev); in enic_link_check()
427 int carrier_ok = netif_carrier_ok(enic->netdev); in enic_link_check()
430 netdev_info(enic->netdev, "Link UP\n"); in enic_link_check()
431 netif_carrier_on(enic->netdev); in enic_link_check()
433 netdev_info(enic->netdev, "Link DOWN\n"); in enic_link_check()
434 netif_carrier_off(enic->netdev); in enic_link_check()
438 static void enic_notify_check(struct enic *enic) in enic_notify_check() argument
440 enic_msglvl_check(enic); in enic_notify_check()
441 enic_mtu_check(enic); in enic_notify_check()
442 enic_link_check(enic); in enic_notify_check()
450 struct enic *enic = netdev_priv(netdev); in enic_isr_legacy() local
456 vnic_intr_mask(&enic->intr[io_intr]); in enic_isr_legacy()
458 pba = vnic_intr_legacy_pba(enic->legacy_pba); in enic_isr_legacy()
460 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
465 enic_notify_check(enic); in enic_isr_legacy()
466 vnic_intr_return_all_credits(&enic->intr[notify_intr]); in enic_isr_legacy()
470 vnic_intr_return_all_credits(&enic->intr[err_intr]); in enic_isr_legacy()
471 enic_log_q_error(enic); in enic_isr_legacy()
473 schedule_work(&enic->reset); in enic_isr_legacy()
478 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_legacy()
480 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
487 struct enic *enic = data; in enic_isr_msi() local
505 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_msi()
521 struct enic *enic = data; in enic_isr_msix_err() local
522 unsigned int intr = enic_msix_err_intr(enic); in enic_isr_msix_err()
524 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_err()
526 if (enic_log_q_error(enic)) in enic_isr_msix_err()
528 schedule_work(&enic->reset); in enic_isr_msix_err()
535 struct enic *enic = data; in enic_isr_msix_notify() local
536 unsigned int intr = enic_msix_notify_intr(enic); in enic_isr_msix_notify()
538 enic_notify_check(enic); in enic_isr_msix_notify()
539 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_notify()
544 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_cont() argument
554 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, in enic_queue_wq_skb_cont()
557 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_cont()
567 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_vlan() argument
577 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_vlan()
579 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_vlan()
591 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_vlan()
596 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_csum_l4() argument
608 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_csum_l4()
610 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_csum_l4()
623 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_csum_l4()
668 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_tso() argument
696 dma_addr = dma_map_single(&enic->pdev->dev, in enic_queue_wq_skb_tso()
699 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
722 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, in enic_queue_wq_skb_tso()
725 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
739 static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_encap() argument
756 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_encap()
758 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_encap()
766 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_encap()
771 static inline int enic_queue_wq_skb(struct enic *enic, in enic_queue_wq_skb() argument
784 } else if (enic->loop_enable) { in enic_queue_wq_skb()
785 vlan_tag = enic->loop_tag; in enic_queue_wq_skb()
790 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, in enic_queue_wq_skb()
794 err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
797 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
800 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
824 struct enic *enic = netdev_priv(netdev); in enic_hard_start_xmit() local
834 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; in enic_hard_start_xmit()
835 wq = &enic->wq[txq_map]; in enic_hard_start_xmit()
850 spin_lock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
857 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
861 if (enic_queue_wq_skb(enic, wq, skb)) in enic_hard_start_xmit()
871 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
880 struct enic *enic = netdev_priv(netdev); in enic_get_stats() local
884 err = enic_dev_stats_dump(enic, &stats); in enic_get_stats()
901 net_stats->rx_over_errors = enic->rq_truncated_pkts; in enic_get_stats()
902 net_stats->rx_crc_errors = enic->rq_bad_fcs; in enic_get_stats()
908 struct enic *enic = netdev_priv(netdev); in enic_mc_sync() local
910 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { in enic_mc_sync()
919 enic_dev_add_addr(enic, mc_addr); in enic_mc_sync()
920 enic->mc_count++; in enic_mc_sync()
927 struct enic *enic = netdev_priv(netdev); in enic_mc_unsync() local
929 enic_dev_del_addr(enic, mc_addr); in enic_mc_unsync()
930 enic->mc_count--; in enic_mc_unsync()
937 struct enic *enic = netdev_priv(netdev); in enic_uc_sync() local
939 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { in enic_uc_sync()
948 enic_dev_add_addr(enic, uc_addr); in enic_uc_sync()
949 enic->uc_count++; in enic_uc_sync()
956 struct enic *enic = netdev_priv(netdev); in enic_uc_unsync() local
958 enic_dev_del_addr(enic, uc_addr); in enic_uc_unsync()
959 enic->uc_count--; in enic_uc_unsync()
964 void enic_reset_addr_lists(struct enic *enic) in enic_reset_addr_lists() argument
966 struct net_device *netdev = enic->netdev; in enic_reset_addr_lists()
971 enic->mc_count = 0; in enic_reset_addr_lists()
972 enic->uc_count = 0; in enic_reset_addr_lists()
973 enic->flags = 0; in enic_reset_addr_lists()
978 struct enic *enic = netdev_priv(netdev); in enic_set_mac_addr() local
980 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_set_mac_addr()
995 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address_dynamic() local
1000 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1001 err = enic_dev_del_station_addr(enic); in enic_set_mac_address_dynamic()
1010 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1011 err = enic_dev_add_station_addr(enic); in enic_set_mac_address_dynamic()
1023 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address() local
1026 err = enic_dev_del_station_addr(enic); in enic_set_mac_address()
1034 return enic_dev_add_station_addr(enic); in enic_set_mac_address()
1040 struct enic *enic = netdev_priv(netdev); in enic_set_rx_mode() local
1052 if (enic->flags != flags) { in enic_set_rx_mode()
1053 enic->flags = flags; in enic_set_rx_mode()
1054 enic_dev_packet_filter(enic, directed, in enic_set_rx_mode()
1068 struct enic *enic = netdev_priv(netdev); in enic_tx_timeout() local
1069 schedule_work(&enic->tx_hang_reset); in enic_tx_timeout()
1074 struct enic *enic = netdev_priv(netdev); in enic_set_vf_mac() local
1078 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_mac()
1090 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_mac()
1102 struct enic *enic = netdev_priv(netdev); in enic_set_vf_port() local
1107 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_port()
1114 memcpy(&prev_pp, pp, sizeof(*enic->pp)); in enic_set_vf_port()
1115 memset(pp, 0, sizeof(*enic->pp)); in enic_set_vf_port()
1147 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_port()
1156 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); in enic_set_vf_port()
1191 struct enic *enic = netdev_priv(netdev); in enic_get_vf_port() local
1196 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_get_vf_port()
1203 err = enic_process_get_pp_request(enic, vf, pp->request, &response); in enic_get_vf_port()
1225 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf() local
1230 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_rq_buf()
1238 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf() local
1239 struct net_device *netdev = enic->netdev; in enic_rq_alloc_buf()
1256 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, in enic_rq_alloc_buf()
1258 if (unlikely(enic_dma_map_check(enic, dma_addr))) { in enic_rq_alloc_buf()
1281 struct enic *enic = netdev_priv(netdev); in enic_rxcopybreak() local
1284 if (len > enic->rx_copybreak) in enic_rxcopybreak()
1289 dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, in enic_rxcopybreak()
1301 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf() local
1302 struct net_device *netdev = enic->netdev; in enic_rq_indicate_buf()
1304 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_rq_indicate_buf()
1334 enic->rq_bad_fcs++; in enic_rq_indicate_buf()
1336 enic->rq_truncated_pkts++; in enic_rq_indicate_buf()
1339 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1354 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, in enic_rq_indicate_buf()
1377 if (enic->vxlan.vxlan_udp_port_number) { in enic_rq_indicate_buf()
1378 switch (enic->vxlan.patch_level) { in enic_rq_indicate_buf()
1415 skb_mark_napi_id(skb, &enic->napi[rq->index]); in enic_rq_indicate_buf()
1419 napi_gro_receive(&enic->napi[q_number], skb); in enic_rq_indicate_buf()
1420 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_rq_indicate_buf()
1428 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1438 struct enic *enic = vnic_dev_priv(vdev); in enic_rq_service() local
1440 vnic_rq_service(&enic->rq[q_number], cq_desc, in enic_rq_service()
1447 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_set_int_moderation() argument
1449 unsigned int intr = enic_msix_rq_intr(enic, rq->index); in enic_set_int_moderation()
1450 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation()
1454 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); in enic_set_int_moderation()
1459 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_calc_int_moderation() argument
1461 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_calc_int_moderation()
1462 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation()
1508 struct enic *enic = netdev_priv(netdev); in enic_poll() local
1509 unsigned int cq_rq = enic_cq_rq(enic, 0); in enic_poll()
1510 unsigned int cq_wq = enic_cq_wq(enic, 0); in enic_poll()
1517 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, in enic_poll()
1521 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], in enic_poll()
1532 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1537 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); in enic_poll()
1545 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1549 enic_calc_int_moderation(enic, &enic->rq[0]); in enic_poll()
1557 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1558 enic_set_int_moderation(enic, &enic->rq[0]); in enic_poll()
1559 vnic_intr_unmask(&enic->intr[intr]); in enic_poll()
1566 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1568 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); in enic_free_rx_cpu_rmap()
1569 enic->netdev->rx_cpu_rmap = NULL; in enic_free_rx_cpu_rmap()
1572 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1576 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { in enic_set_rx_cpu_rmap()
1577 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); in enic_set_rx_cpu_rmap()
1578 if (unlikely(!enic->netdev->rx_cpu_rmap)) in enic_set_rx_cpu_rmap()
1580 for (i = 0; i < enic->rq_count; i++) { in enic_set_rx_cpu_rmap()
1581 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, in enic_set_rx_cpu_rmap()
1582 enic->msix_entry[i].vector); in enic_set_rx_cpu_rmap()
1584 enic_free_rx_cpu_rmap(enic); in enic_set_rx_cpu_rmap()
1593 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1597 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1606 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_wq() local
1607 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; in enic_poll_msix_wq()
1608 struct vnic_wq *wq = &enic->wq[wq_index]; in enic_poll_msix_wq()
1616 cq = enic_cq_wq(enic, wq_irq); in enic_poll_msix_wq()
1617 intr = enic_msix_wq_intr(enic, wq_irq); in enic_poll_msix_wq()
1618 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, in enic_poll_msix_wq()
1621 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, in enic_poll_msix_wq()
1626 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_wq()
1636 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_rq() local
1637 unsigned int rq = (napi - &enic->napi[0]); in enic_poll_msix_rq()
1638 unsigned int cq = enic_cq_rq(enic, rq); in enic_poll_msix_rq()
1639 unsigned int intr = enic_msix_rq_intr(enic, rq); in enic_poll_msix_rq()
1648 work_done = vnic_cq_service(&enic->cq[cq], in enic_poll_msix_rq()
1657 vnic_intr_return_credits(&enic->intr[intr], in enic_poll_msix_rq()
1662 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); in enic_poll_msix_rq()
1670 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1674 enic_calc_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1682 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1683 enic_set_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1684 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_rq()
1692 struct enic *enic = from_timer(enic, t, notify_timer); in enic_notify_timer() local
1694 enic_notify_check(enic); in enic_notify_timer()
1696 mod_timer(&enic->notify_timer, in enic_notify_timer()
1700 static void enic_free_intr(struct enic *enic) in enic_free_intr() argument
1702 struct net_device *netdev = enic->netdev; in enic_free_intr()
1705 enic_free_rx_cpu_rmap(enic); in enic_free_intr()
1706 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_free_intr()
1708 free_irq(enic->pdev->irq, netdev); in enic_free_intr()
1711 free_irq(enic->pdev->irq, enic); in enic_free_intr()
1714 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_free_intr()
1715 if (enic->msix[i].requested) in enic_free_intr()
1716 free_irq(enic->msix_entry[i].vector, in enic_free_intr()
1717 enic->msix[i].devid); in enic_free_intr()
1724 static int enic_request_intr(struct enic *enic) in enic_request_intr() argument
1726 struct net_device *netdev = enic->netdev; in enic_request_intr()
1730 enic_set_rx_cpu_rmap(enic); in enic_request_intr()
1731 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_request_intr()
1735 err = request_irq(enic->pdev->irq, enic_isr_legacy, in enic_request_intr()
1741 err = request_irq(enic->pdev->irq, enic_isr_msi, in enic_request_intr()
1742 0, netdev->name, enic); in enic_request_intr()
1747 for (i = 0; i < enic->rq_count; i++) { in enic_request_intr()
1748 intr = enic_msix_rq_intr(enic, i); in enic_request_intr()
1749 snprintf(enic->msix[intr].devname, in enic_request_intr()
1750 sizeof(enic->msix[intr].devname), in enic_request_intr()
1752 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1753 enic->msix[intr].devid = &enic->napi[i]; in enic_request_intr()
1756 for (i = 0; i < enic->wq_count; i++) { in enic_request_intr()
1757 int wq = enic_cq_wq(enic, i); in enic_request_intr()
1759 intr = enic_msix_wq_intr(enic, i); in enic_request_intr()
1760 snprintf(enic->msix[intr].devname, in enic_request_intr()
1761 sizeof(enic->msix[intr].devname), in enic_request_intr()
1763 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1764 enic->msix[intr].devid = &enic->napi[wq]; in enic_request_intr()
1767 intr = enic_msix_err_intr(enic); in enic_request_intr()
1768 snprintf(enic->msix[intr].devname, in enic_request_intr()
1769 sizeof(enic->msix[intr].devname), in enic_request_intr()
1771 enic->msix[intr].isr = enic_isr_msix_err; in enic_request_intr()
1772 enic->msix[intr].devid = enic; in enic_request_intr()
1774 intr = enic_msix_notify_intr(enic); in enic_request_intr()
1775 snprintf(enic->msix[intr].devname, in enic_request_intr()
1776 sizeof(enic->msix[intr].devname), in enic_request_intr()
1778 enic->msix[intr].isr = enic_isr_msix_notify; in enic_request_intr()
1779 enic->msix[intr].devid = enic; in enic_request_intr()
1781 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_request_intr()
1782 enic->msix[i].requested = 0; in enic_request_intr()
1784 for (i = 0; i < enic->intr_count; i++) { in enic_request_intr()
1785 err = request_irq(enic->msix_entry[i].vector, in enic_request_intr()
1786 enic->msix[i].isr, 0, in enic_request_intr()
1787 enic->msix[i].devname, in enic_request_intr()
1788 enic->msix[i].devid); in enic_request_intr()
1790 enic_free_intr(enic); in enic_request_intr()
1793 enic->msix[i].requested = 1; in enic_request_intr()
1805 static void enic_synchronize_irqs(struct enic *enic) in enic_synchronize_irqs() argument
1809 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_synchronize_irqs()
1812 synchronize_irq(enic->pdev->irq); in enic_synchronize_irqs()
1815 for (i = 0; i < enic->intr_count; i++) in enic_synchronize_irqs()
1816 synchronize_irq(enic->msix_entry[i].vector); in enic_synchronize_irqs()
1823 static void enic_set_rx_coal_setting(struct enic *enic) in enic_set_rx_coal_setting() argument
1827 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_set_rx_coal_setting()
1833 speed = vnic_dev_port_speed(enic->vdev); in enic_set_rx_coal_setting()
1846 for (index = 0; index < enic->rq_count; index++) in enic_set_rx_coal_setting()
1847 enic->cq[index].cur_rx_coal_timeval = in enic_set_rx_coal_setting()
1848 enic->config.intr_timer_usec; in enic_set_rx_coal_setting()
1853 static int enic_dev_notify_set(struct enic *enic) in enic_dev_notify_set() argument
1857 spin_lock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1858 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_notify_set()
1860 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1864 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1865 enic_msix_notify_intr(enic)); in enic_dev_notify_set()
1868 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); in enic_dev_notify_set()
1871 spin_unlock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1876 static void enic_notify_timer_start(struct enic *enic) in enic_notify_timer_start() argument
1878 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_notify_timer_start()
1880 mod_timer(&enic->notify_timer, jiffies); in enic_notify_timer_start()
1891 struct enic *enic = netdev_priv(netdev); in enic_open() local
1895 err = enic_request_intr(enic); in enic_open()
1900 enic_init_affinity_hint(enic); in enic_open()
1901 enic_set_affinity_hint(enic); in enic_open()
1903 err = enic_dev_notify_set(enic); in enic_open()
1910 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1912 vnic_rq_enable(&enic->rq[i]); in enic_open()
1913 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); in enic_open()
1915 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { in enic_open()
1922 for (i = 0; i < enic->wq_count; i++) in enic_open()
1923 vnic_wq_enable(&enic->wq[i]); in enic_open()
1925 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_open()
1926 enic_dev_add_station_addr(enic); in enic_open()
1932 for (i = 0; i < enic->rq_count; i++) in enic_open()
1933 napi_enable(&enic->napi[i]); in enic_open()
1935 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_open()
1936 for (i = 0; i < enic->wq_count; i++) in enic_open()
1937 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); in enic_open()
1938 enic_dev_enable(enic); in enic_open()
1940 for (i = 0; i < enic->intr_count; i++) in enic_open()
1941 vnic_intr_unmask(&enic->intr[i]); in enic_open()
1943 enic_notify_timer_start(enic); in enic_open()
1944 enic_rfs_timer_start(enic); in enic_open()
1949 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1950 ret = vnic_rq_disable(&enic->rq[i]); in enic_open()
1952 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_open()
1954 enic_dev_notify_unset(enic); in enic_open()
1956 enic_unset_affinity_hint(enic); in enic_open()
1957 enic_free_intr(enic); in enic_open()
1965 struct enic *enic = netdev_priv(netdev); in enic_stop() local
1969 for (i = 0; i < enic->intr_count; i++) { in enic_stop()
1970 vnic_intr_mask(&enic->intr[i]); in enic_stop()
1971 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ in enic_stop()
1974 enic_synchronize_irqs(enic); in enic_stop()
1976 del_timer_sync(&enic->notify_timer); in enic_stop()
1977 enic_rfs_flw_tbl_free(enic); in enic_stop()
1979 enic_dev_disable(enic); in enic_stop()
1981 for (i = 0; i < enic->rq_count; i++) in enic_stop()
1982 napi_disable(&enic->napi[i]); in enic_stop()
1985 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_stop()
1986 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1987 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); in enic_stop()
1990 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_stop()
1991 enic_dev_del_station_addr(enic); in enic_stop()
1993 for (i = 0; i < enic->wq_count; i++) { in enic_stop()
1994 err = vnic_wq_disable(&enic->wq[i]); in enic_stop()
1998 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1999 err = vnic_rq_disable(&enic->rq[i]); in enic_stop()
2004 enic_dev_notify_unset(enic); in enic_stop()
2005 enic_unset_affinity_hint(enic); in enic_stop()
2006 enic_free_intr(enic); in enic_stop()
2008 for (i = 0; i < enic->wq_count; i++) in enic_stop()
2009 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); in enic_stop()
2010 for (i = 0; i < enic->rq_count; i++) in enic_stop()
2011 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_stop()
2012 for (i = 0; i < enic->cq_count; i++) in enic_stop()
2013 vnic_cq_clean(&enic->cq[i]); in enic_stop()
2014 for (i = 0; i < enic->intr_count; i++) in enic_stop()
2015 vnic_intr_clean(&enic->intr[i]); in enic_stop()
2045 struct enic *enic = netdev_priv(netdev); in enic_change_mtu() local
2047 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_change_mtu()
2050 if (netdev->mtu > enic->port_mtu) in enic_change_mtu()
2053 netdev->mtu, enic->port_mtu); in enic_change_mtu()
2060 struct enic *enic = container_of(work, struct enic, change_mtu_work); in enic_change_mtu_work() local
2061 struct net_device *netdev = enic->netdev; in enic_change_mtu_work()
2062 int new_mtu = vnic_dev_mtu(enic->vdev); in enic_change_mtu_work()
2074 struct enic *enic = netdev_priv(netdev); in enic_poll_controller() local
2075 struct vnic_dev *vdev = enic->vdev; in enic_poll_controller()
2080 for (i = 0; i < enic->rq_count; i++) { in enic_poll_controller()
2081 intr = enic_msix_rq_intr(enic, i); in enic_poll_controller()
2082 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
2083 &enic->napi[i]); in enic_poll_controller()
2086 for (i = 0; i < enic->wq_count; i++) { in enic_poll_controller()
2087 intr = enic_msix_wq_intr(enic, i); in enic_poll_controller()
2088 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
2089 &enic->napi[enic_cq_wq(enic, i)]); in enic_poll_controller()
2094 enic_isr_msi(enic->pdev->irq, enic); in enic_poll_controller()
2097 enic_isr_legacy(enic->pdev->irq, netdev); in enic_poll_controller()
2138 static int enic_dev_open(struct enic *enic) in enic_dev_open() argument
2143 err = enic_dev_wait(enic->vdev, vnic_dev_open, in enic_dev_open()
2146 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", in enic_dev_open()
2152 static int enic_dev_soft_reset(struct enic *enic) in enic_dev_soft_reset() argument
2156 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, in enic_dev_soft_reset()
2159 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", in enic_dev_soft_reset()
2165 static int enic_dev_hang_reset(struct enic *enic) in enic_dev_hang_reset() argument
2169 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, in enic_dev_hang_reset()
2172 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", in enic_dev_hang_reset()
2178 int __enic_set_rsskey(struct enic *enic) in __enic_set_rsskey() argument
2184 rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, in __enic_set_rsskey()
2193 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; in __enic_set_rsskey()
2195 spin_lock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2196 err = enic_set_rss_key(enic, in __enic_set_rsskey()
2199 spin_unlock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2201 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), in __enic_set_rsskey()
2207 static int enic_set_rsskey(struct enic *enic) in enic_set_rsskey() argument
2209 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); in enic_set_rsskey()
2211 return __enic_set_rsskey(enic); in enic_set_rsskey()
2214 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) in enic_set_rsscpu() argument
2221 rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, in enic_set_rsscpu()
2228 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; in enic_set_rsscpu()
2230 spin_lock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2231 err = enic_set_rss_cpu(enic, in enic_set_rsscpu()
2234 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2236 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), in enic_set_rsscpu()
2242 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, in enic_set_niccfg() argument
2252 spin_lock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2253 err = enic_set_nic_cfg(enic, in enic_set_niccfg()
2258 spin_unlock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2263 static int enic_set_rss_nic_cfg(struct enic *enic) in enic_set_rss_nic_cfg() argument
2265 struct device *dev = enic_get_dev(enic); in enic_set_rss_nic_cfg()
2271 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); in enic_set_rss_nic_cfg()
2273 spin_lock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2274 res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); in enic_set_rss_nic_cfg()
2275 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2286 if (!enic_set_rsskey(enic)) { in enic_set_rss_nic_cfg()
2287 if (enic_set_rsscpu(enic, rss_hash_bits)) { in enic_set_rss_nic_cfg()
2298 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, in enic_set_rss_nic_cfg()
2302 static void enic_set_api_busy(struct enic *enic, bool busy) in enic_set_api_busy() argument
2304 spin_lock(&enic->enic_api_lock); in enic_set_api_busy()
2305 enic->enic_api_busy = busy; in enic_set_api_busy()
2306 spin_unlock(&enic->enic_api_lock); in enic_set_api_busy()
2311 struct enic *enic = container_of(work, struct enic, reset); in enic_reset() local
2313 if (!netif_running(enic->netdev)) in enic_reset()
2319 enic_set_api_busy(enic, true); in enic_reset()
2321 enic_stop(enic->netdev); in enic_reset()
2322 enic_dev_soft_reset(enic); in enic_reset()
2323 enic_reset_addr_lists(enic); in enic_reset()
2324 enic_init_vnic_resources(enic); in enic_reset()
2325 enic_set_rss_nic_cfg(enic); in enic_reset()
2326 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_reset()
2327 enic_open(enic->netdev); in enic_reset()
2330 enic_set_api_busy(enic, false); in enic_reset()
2332 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_reset()
2339 struct enic *enic = container_of(work, struct enic, tx_hang_reset); in enic_tx_hang_reset() local
2344 enic_set_api_busy(enic, true); in enic_tx_hang_reset()
2346 enic_dev_hang_notify(enic); in enic_tx_hang_reset()
2347 enic_stop(enic->netdev); in enic_tx_hang_reset()
2348 enic_dev_hang_reset(enic); in enic_tx_hang_reset()
2349 enic_reset_addr_lists(enic); in enic_tx_hang_reset()
2350 enic_init_vnic_resources(enic); in enic_tx_hang_reset()
2351 enic_set_rss_nic_cfg(enic); in enic_tx_hang_reset()
2352 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_tx_hang_reset()
2353 enic_open(enic->netdev); in enic_tx_hang_reset()
2356 enic_set_api_busy(enic, false); in enic_tx_hang_reset()
2358 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_tx_hang_reset()
2363 static int enic_set_intr_mode(struct enic *enic) in enic_set_intr_mode() argument
2365 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); in enic_set_intr_mode()
2366 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); in enic_set_intr_mode()
2379 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); in enic_set_intr_mode()
2381 enic->msix_entry[i].entry = i; in enic_set_intr_mode()
2386 if (ENIC_SETTING(enic, RSS) && in enic_set_intr_mode()
2387 enic->config.intr_mode < 1 && in enic_set_intr_mode()
2388 enic->rq_count >= n && in enic_set_intr_mode()
2389 enic->wq_count >= m && in enic_set_intr_mode()
2390 enic->cq_count >= n + m && in enic_set_intr_mode()
2391 enic->intr_count >= n + m + 2) { in enic_set_intr_mode()
2393 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2396 enic->rq_count = n; in enic_set_intr_mode()
2397 enic->wq_count = m; in enic_set_intr_mode()
2398 enic->cq_count = n + m; in enic_set_intr_mode()
2399 enic->intr_count = n + m + 2; in enic_set_intr_mode()
2401 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2408 if (enic->config.intr_mode < 1 && in enic_set_intr_mode()
2409 enic->rq_count >= 1 && in enic_set_intr_mode()
2410 enic->wq_count >= m && in enic_set_intr_mode()
2411 enic->cq_count >= 1 + m && in enic_set_intr_mode()
2412 enic->intr_count >= 1 + m + 2) { in enic_set_intr_mode()
2413 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2416 enic->rq_count = 1; in enic_set_intr_mode()
2417 enic->wq_count = m; in enic_set_intr_mode()
2418 enic->cq_count = 1 + m; in enic_set_intr_mode()
2419 enic->intr_count = 1 + m + 2; in enic_set_intr_mode()
2421 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2433 if (enic->config.intr_mode < 2 && in enic_set_intr_mode()
2434 enic->rq_count >= 1 && in enic_set_intr_mode()
2435 enic->wq_count >= 1 && in enic_set_intr_mode()
2436 enic->cq_count >= 2 && in enic_set_intr_mode()
2437 enic->intr_count >= 1 && in enic_set_intr_mode()
2438 !pci_enable_msi(enic->pdev)) { in enic_set_intr_mode()
2440 enic->rq_count = 1; in enic_set_intr_mode()
2441 enic->wq_count = 1; in enic_set_intr_mode()
2442 enic->cq_count = 2; in enic_set_intr_mode()
2443 enic->intr_count = 1; in enic_set_intr_mode()
2445 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); in enic_set_intr_mode()
2458 if (enic->config.intr_mode < 3 && in enic_set_intr_mode()
2459 enic->rq_count >= 1 && in enic_set_intr_mode()
2460 enic->wq_count >= 1 && in enic_set_intr_mode()
2461 enic->cq_count >= 2 && in enic_set_intr_mode()
2462 enic->intr_count >= 3) { in enic_set_intr_mode()
2464 enic->rq_count = 1; in enic_set_intr_mode()
2465 enic->wq_count = 1; in enic_set_intr_mode()
2466 enic->cq_count = 2; in enic_set_intr_mode()
2467 enic->intr_count = 3; in enic_set_intr_mode()
2469 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); in enic_set_intr_mode()
2474 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_set_intr_mode()
2479 static void enic_clear_intr_mode(struct enic *enic) in enic_clear_intr_mode() argument
2481 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_clear_intr_mode()
2483 pci_disable_msix(enic->pdev); in enic_clear_intr_mode()
2486 pci_disable_msi(enic->pdev); in enic_clear_intr_mode()
2492 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_clear_intr_mode()
2543 static void enic_dev_deinit(struct enic *enic) in enic_dev_deinit() argument
2547 for (i = 0; i < enic->rq_count; i++) in enic_dev_deinit()
2548 __netif_napi_del(&enic->napi[i]); in enic_dev_deinit()
2550 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_dev_deinit()
2551 for (i = 0; i < enic->wq_count; i++) in enic_dev_deinit()
2552 __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); in enic_dev_deinit()
2557 enic_free_vnic_resources(enic); in enic_dev_deinit()
2558 enic_clear_intr_mode(enic); in enic_dev_deinit()
2559 enic_free_affinity_hint(enic); in enic_dev_deinit()
2562 static void enic_kdump_kernel_config(struct enic *enic) in enic_kdump_kernel_config() argument
2565 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); in enic_kdump_kernel_config()
2566 enic->rq_count = 1; in enic_kdump_kernel_config()
2567 enic->wq_count = 1; in enic_kdump_kernel_config()
2568 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; in enic_kdump_kernel_config()
2569 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; in enic_kdump_kernel_config()
2570 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); in enic_kdump_kernel_config()
2574 static int enic_dev_init(struct enic *enic) in enic_dev_init() argument
2576 struct device *dev = enic_get_dev(enic); in enic_dev_init()
2577 struct net_device *netdev = enic->netdev; in enic_dev_init()
2582 err = enic_dev_intr_coal_timer_info(enic); in enic_dev_init()
2586 vnic_dev_intr_coal_timer_info_default(enic->vdev); in enic_dev_init()
2592 err = enic_get_vnic_config(enic); in enic_dev_init()
2601 enic_get_res_counts(enic); in enic_dev_init()
2605 enic_kdump_kernel_config(enic); in enic_dev_init()
2611 err = enic_set_intr_mode(enic); in enic_dev_init()
2621 err = enic_alloc_vnic_resources(enic); in enic_dev_init()
2627 enic_init_vnic_resources(enic); in enic_dev_init()
2629 err = enic_set_rss_nic_cfg(enic); in enic_dev_init()
2635 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_init()
2637 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); in enic_dev_init()
2640 for (i = 0; i < enic->rq_count; i++) { in enic_dev_init()
2641 netif_napi_add(netdev, &enic->napi[i], in enic_dev_init()
2644 for (i = 0; i < enic->wq_count; i++) in enic_dev_init()
2645 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], in enic_dev_init()
2653 enic_free_affinity_hint(enic); in enic_dev_init()
2654 enic_clear_intr_mode(enic); in enic_dev_init()
2655 enic_free_vnic_resources(enic); in enic_dev_init()
2660 static void enic_iounmap(struct enic *enic) in enic_iounmap() argument
2664 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) in enic_iounmap()
2665 if (enic->bar[i].vaddr) in enic_iounmap()
2666 iounmap(enic->bar[i].vaddr); in enic_iounmap()
2673 struct enic *enic; in enic_probe() local
2686 netdev = alloc_etherdev_mqs(sizeof(struct enic), in enic_probe()
2695 enic = netdev_priv(netdev); in enic_probe()
2696 enic->netdev = netdev; in enic_probe()
2697 enic->pdev = pdev; in enic_probe()
2747 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { in enic_probe()
2750 enic->bar[i].len = pci_resource_len(pdev, i); in enic_probe()
2751 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); in enic_probe()
2752 if (!enic->bar[i].vaddr) { in enic_probe()
2757 enic->bar[i].bus_addr = pci_resource_start(pdev, i); in enic_probe()
2763 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, in enic_probe()
2764 ARRAY_SIZE(enic->bar)); in enic_probe()
2765 if (!enic->vdev) { in enic_probe()
2771 err = vnic_devcmd_init(enic->vdev); in enic_probe()
2781 &enic->num_vfs); in enic_probe()
2782 if (enic->num_vfs) { in enic_probe()
2783 err = pci_enable_sriov(pdev, enic->num_vfs); in enic_probe()
2790 enic->priv_flags |= ENIC_SRIOV_ENABLED; in enic_probe()
2791 num_pps = enic->num_vfs; in enic_probe()
2797 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); in enic_probe()
2798 if (!enic->pp) { in enic_probe()
2806 err = enic_dev_open(enic); in enic_probe()
2815 spin_lock_init(&enic->devcmd_lock); in enic_probe()
2816 spin_lock_init(&enic->enic_api_lock); in enic_probe()
2822 err = enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_probe()
2844 if (!enic_is_dynamic(enic)) { in enic_probe()
2845 err = vnic_dev_init(enic->vdev, 0); in enic_probe()
2852 err = enic_dev_init(enic); in enic_probe()
2858 netif_set_real_num_tx_queues(netdev, enic->wq_count); in enic_probe()
2859 netif_set_real_num_rx_queues(netdev, enic->rq_count); in enic_probe()
2864 timer_setup(&enic->notify_timer, enic_notify_timer, 0); in enic_probe()
2866 enic_rfs_flw_tbl_init(enic); in enic_probe()
2867 enic_set_rx_coal_setting(enic); in enic_probe()
2868 INIT_WORK(&enic->reset, enic_reset); in enic_probe()
2869 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); in enic_probe()
2870 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); in enic_probe()
2872 for (i = 0; i < enic->wq_count; i++) in enic_probe()
2873 spin_lock_init(&enic->wq_lock[i]); in enic_probe()
2878 enic->port_mtu = enic->config.mtu; in enic_probe()
2880 err = enic_set_mac_addr(netdev, enic->mac_addr); in enic_probe()
2886 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; in enic_probe()
2890 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; in enic_probe()
2892 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_probe()
2901 if (ENIC_SETTING(enic, LOOP)) { in enic_probe()
2903 enic->loop_enable = 1; in enic_probe()
2904 enic->loop_tag = enic->config.loop_tag; in enic_probe()
2905 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); in enic_probe()
2907 if (ENIC_SETTING(enic, TXCSUM)) in enic_probe()
2909 if (ENIC_SETTING(enic, TSO)) in enic_probe()
2912 if (ENIC_SETTING(enic, RSS)) in enic_probe()
2914 if (ENIC_SETTING(enic, RXCSUM)) in enic_probe()
2916 if (ENIC_SETTING(enic, VXLAN)) { in enic_probe()
2939 err = vnic_dev_get_supported_feature_ver(enic->vdev, in enic_probe()
2944 enic->vxlan.flags = (u8)a1; in enic_probe()
2950 enic->vxlan.patch_level = patch_level; in enic_probe()
2952 if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || in enic_probe()
2953 enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { in enic_probe()
2955 if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) in enic_probe()
2975 netdev->mtu = enic->port_mtu; in enic_probe()
2982 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; in enic_probe()
2987 enic_dev_deinit(enic); in enic_probe()
2989 vnic_dev_close(enic->vdev); in enic_probe()
2991 kfree(enic->pp); in enic_probe()
2994 if (enic_sriov_enabled(enic)) { in enic_probe()
2996 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_probe()
3000 vnic_dev_unregister(enic->vdev); in enic_probe()
3002 enic_iounmap(enic); in enic_probe()
3018 struct enic *enic = netdev_priv(netdev); in enic_remove() local
3020 cancel_work_sync(&enic->reset); in enic_remove()
3021 cancel_work_sync(&enic->change_mtu_work); in enic_remove()
3023 enic_dev_deinit(enic); in enic_remove()
3024 vnic_dev_close(enic->vdev); in enic_remove()
3026 if (enic_sriov_enabled(enic)) { in enic_remove()
3028 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_remove()
3031 kfree(enic->pp); in enic_remove()
3032 vnic_dev_unregister(enic->vdev); in enic_remove()
3033 enic_iounmap(enic); in enic_remove()