Lines Matching refs:net_dev

225 static int dpaa_netdev_init(struct net_device *net_dev,  in dpaa_netdev_init()  argument
229 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_netdev_init()
230 struct device *dev = net_dev->dev.parent; in dpaa_netdev_init()
240 percpu_priv->net_dev = net_dev; in dpaa_netdev_init()
243 net_dev->netdev_ops = dpaa_ops; in dpaa_netdev_init()
246 net_dev->mem_start = priv->mac_dev->res->start; in dpaa_netdev_init()
247 net_dev->mem_end = priv->mac_dev->res->end; in dpaa_netdev_init()
249 net_dev->min_mtu = ETH_MIN_MTU; in dpaa_netdev_init()
250 net_dev->max_mtu = dpaa_get_max_mtu(); in dpaa_netdev_init()
252 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in dpaa_netdev_init()
255 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; in dpaa_netdev_init()
259 net_dev->features |= NETIF_F_GSO; in dpaa_netdev_init()
260 net_dev->features |= NETIF_F_RXCSUM; in dpaa_netdev_init()
262 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in dpaa_netdev_init()
264 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; in dpaa_netdev_init()
266 net_dev->features |= net_dev->hw_features; in dpaa_netdev_init()
267 net_dev->vlan_features = net_dev->features; in dpaa_netdev_init()
270 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); in dpaa_netdev_init()
271 eth_hw_addr_set(net_dev, mac_addr); in dpaa_netdev_init()
273 eth_hw_addr_random(net_dev); in dpaa_netdev_init()
275 (const enet_addr_t *)net_dev->dev_addr); in dpaa_netdev_init()
281 net_dev->dev_addr); in dpaa_netdev_init()
284 net_dev->ethtool_ops = &dpaa_ethtool_ops; in dpaa_netdev_init()
286 net_dev->needed_headroom = priv->tx_headroom; in dpaa_netdev_init()
287 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); in dpaa_netdev_init()
290 netif_carrier_off(net_dev); in dpaa_netdev_init()
292 err = register_netdev(net_dev); in dpaa_netdev_init()
301 static int dpaa_stop(struct net_device *net_dev) in dpaa_stop() argument
307 priv = netdev_priv(net_dev); in dpaa_stop()
310 netif_tx_stop_all_queues(net_dev); in dpaa_stop()
318 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", in dpaa_stop()
327 if (net_dev->phydev) in dpaa_stop()
328 phy_disconnect(net_dev->phydev); in dpaa_stop()
329 net_dev->phydev = NULL; in dpaa_stop()
336 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue) in dpaa_tx_timeout() argument
341 priv = netdev_priv(net_dev); in dpaa_tx_timeout()
344 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", in dpaa_tx_timeout()
345 jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); in dpaa_tx_timeout()
353 static void dpaa_get_stats64(struct net_device *net_dev, in dpaa_get_stats64() argument
357 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_get_stats64()
374 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, in dpaa_setup_tc() argument
377 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_setup_tc()
392 netdev_reset_tc(net_dev); in dpaa_setup_tc()
397 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", in dpaa_setup_tc()
402 netdev_set_num_tc(net_dev, num_tc); in dpaa_setup_tc()
405 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, in dpaa_setup_tc()
410 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); in dpaa_setup_tc()
435 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) in dpaa_set_mac_address() argument
442 priv = netdev_priv(net_dev); in dpaa_set_mac_address()
444 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); in dpaa_set_mac_address()
446 err = eth_mac_addr(net_dev, addr); in dpaa_set_mac_address()
448 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); in dpaa_set_mac_address()
455 (const enet_addr_t *)net_dev->dev_addr); in dpaa_set_mac_address()
457 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", in dpaa_set_mac_address()
460 eth_mac_addr(net_dev, &old_addr); in dpaa_set_mac_address()
468 static void dpaa_set_rx_mode(struct net_device *net_dev) in dpaa_set_rx_mode() argument
473 priv = netdev_priv(net_dev); in dpaa_set_rx_mode()
475 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { in dpaa_set_rx_mode()
480 netif_err(priv, drv, net_dev, in dpaa_set_rx_mode()
485 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) { in dpaa_set_rx_mode()
490 netif_err(priv, drv, net_dev, in dpaa_set_rx_mode()
495 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); in dpaa_set_rx_mode()
497 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", in dpaa_set_rx_mode()
825 netif_tx_stop_all_queues(priv->net_dev); in dpaa_eth_cgscn()
830 netif_tx_wake_all_queues(priv->net_dev); in dpaa_eth_cgscn()
891 fq->net_dev = priv->net_dev; in dpaa_setup_ingress()
903 fq->net_dev = priv->net_dev; in dpaa_setup_egress()
926 dev_err(priv->net_dev->dev.parent, in dpaa_fq_setup()
963 dev_warn(priv->net_dev->dev.parent, in dpaa_fq_setup()
1003 priv = netdev_priv(dpaa_fq->net_dev); in dpaa_fq_init()
1004 dev = dpaa_fq->net_dev->dev.parent; in dpaa_fq_init()
1135 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev, in dpaa_fq_init()
1164 priv = netdev_priv(dpaa_fq->net_dev); in dpaa_fq_free_entry()
1362 static void dpaa_fd_release(const struct net_device *net_dev, in dpaa_fd_release() argument
1391 netdev_err(net_dev, "DMA mapping failed\n"); in dpaa_fd_release()
1493 netif_alert(priv, tx_err, priv->net_dev, in dpaa_enable_tx_csum()
1510 netif_alert(priv, tx_err, priv->net_dev, in dpaa_enable_tx_csum()
1536 struct net_device *net_dev = dpaa_bp->priv->net_dev; in dpaa_bp_add_8_bufs() local
1545 netdev_err(net_dev, "dev_alloc_pages() failed\n"); in dpaa_bp_add_8_bufs()
1553 netdev_err(net_dev, "DMA map failed\n"); in dpaa_bp_add_8_bufs()
1656 struct device *dev = priv->net_dev->dev.parent; in dpaa_cleanup_tx_fd()
1732 if ((priv->net_dev->features & NETIF_F_RXCSUM) && in rx_csum_offload()
1914 struct net_device *net_dev = priv->net_dev; in skb_to_contig_fd() local
1940 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", in skb_to_contig_fd()
1954 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); in skb_to_contig_fd()
1967 struct net_device *net_dev = priv->net_dev; in skb_to_sg_fd() local
1980 netdev_err(net_dev, "dev_alloc_pages() failed\n"); in skb_to_sg_fd()
1994 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", in skb_to_sg_fd()
2008 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2022 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2048 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2084 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); in dpaa_xmit()
2104 static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s) in dpaa_a050385_wa_skb() argument
2106 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_a050385_wa_skb()
2139 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + in dpaa_a050385_wa_skb()
2261 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) in dpaa_start_xmit() argument
2273 priv = netdev_priv(net_dev); in dpaa_start_xmit()
2308 if (dpaa_a050385_wa_skb(net_dev, &skb)) in dpaa_start_xmit()
2325 txq = netdev_get_tx_queue(net_dev, queue_mapping); in dpaa_start_xmit()
2346 static void dpaa_rx_error(struct net_device *net_dev, in dpaa_rx_error() argument
2353 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", in dpaa_rx_error()
2367 dpaa_fd_release(net_dev, fd); in dpaa_rx_error()
2370 static void dpaa_tx_error(struct net_device *net_dev, in dpaa_tx_error() argument
2379 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", in dpaa_tx_error()
2411 static void dpaa_tx_conf(struct net_device *net_dev, in dpaa_tx_conf() argument
2421 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", in dpaa_tx_conf()
2457 struct net_device *net_dev; in rx_error_dqrr() local
2461 net_dev = dpaa_fq->net_dev; in rx_error_dqrr()
2462 priv = netdev_priv(net_dev); in rx_error_dqrr()
2473 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in rx_error_dqrr()
2478 static int dpaa_xdp_xmit_frame(struct net_device *net_dev, in dpaa_xdp_xmit_frame() argument
2481 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_xdp_xmit_frame()
2533 txq = netdev_get_tx_queue(net_dev, smp_processor_id()); in dpaa_xdp_xmit_frame()
2610 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf)) in dpaa_run_xdp()
2619 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); in dpaa_run_xdp()
2621 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2629 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2654 struct net_device *net_dev; in rx_default_dqrr() local
2670 net_dev = dpaa_fq->net_dev; in rx_default_dqrr()
2671 priv = netdev_priv(net_dev); in rx_default_dqrr()
2677 trace_dpaa_rx_fd(net_dev, fq, &dq->fd); in rx_default_dqrr()
2692 dpaa_fd_release(net_dev, &dq->fd); in rx_default_dqrr()
2698 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", in rx_default_dqrr()
2702 dpaa_fd_release(net_dev, fd); in rx_default_dqrr()
2731 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && in rx_default_dqrr()
2774 skb->protocol = eth_type_trans(skb, net_dev); in rx_default_dqrr()
2805 struct net_device *net_dev; in conf_error_dqrr() local
2808 net_dev = ((struct dpaa_fq *)fq)->net_dev; in conf_error_dqrr()
2809 priv = netdev_priv(net_dev); in conf_error_dqrr()
2816 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in conf_error_dqrr()
2827 struct net_device *net_dev; in conf_dflt_dqrr() local
2830 net_dev = ((struct dpaa_fq *)fq)->net_dev; in conf_dflt_dqrr()
2831 priv = netdev_priv(net_dev); in conf_dflt_dqrr()
2834 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); in conf_dflt_dqrr()
2841 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in conf_dflt_dqrr()
2853 struct net_device *net_dev; in egress_ern() local
2856 net_dev = ((struct dpaa_fq *)fq)->net_dev; in egress_ern()
2857 priv = netdev_priv(net_dev); in egress_ern()
2902 static void dpaa_adjust_link(struct net_device *net_dev) in dpaa_adjust_link() argument
2907 priv = netdev_priv(net_dev); in dpaa_adjust_link()
2915 static int dpaa_phy_init(struct net_device *net_dev) in dpaa_phy_init() argument
2922 priv = netdev_priv(net_dev); in dpaa_phy_init()
2925 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, in dpaa_phy_init()
2929 netif_err(priv, ifup, net_dev, "init_phy() failed\n"); in dpaa_phy_init()
2945 net_dev->phydev = phy_dev; in dpaa_phy_init()
2950 static int dpaa_open(struct net_device *net_dev) in dpaa_open() argument
2956 priv = netdev_priv(net_dev); in dpaa_open()
2960 err = dpaa_phy_init(net_dev); in dpaa_open()
2972 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); in dpaa_open()
2976 netif_tx_start_all_queues(net_dev); in dpaa_open()
2990 static int dpaa_eth_stop(struct net_device *net_dev) in dpaa_eth_stop() argument
2995 err = dpaa_stop(net_dev); in dpaa_eth_stop()
2997 priv = netdev_priv(net_dev); in dpaa_eth_stop()
3011 dev_warn(priv->net_dev->dev.parent, in xdp_validate_mtu()
3020 static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu) in dpaa_change_mtu() argument
3022 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_change_mtu()
3027 net_dev->mtu = new_mtu; in dpaa_change_mtu()
3031 static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf) in dpaa_setup_xdp() argument
3033 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_setup_xdp()
3039 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) { in dpaa_setup_xdp()
3044 up = netif_running(net_dev); in dpaa_setup_xdp()
3047 dpaa_eth_stop(net_dev); in dpaa_setup_xdp()
3054 err = dpaa_open(net_dev); in dpaa_setup_xdp()
3064 static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp) in dpaa_xdp() argument
3068 return dpaa_setup_xdp(net_dev, xdp); in dpaa_xdp()
3074 static int dpaa_xdp_xmit(struct net_device *net_dev, int n, in dpaa_xdp_xmit() argument
3083 if (!netif_running(net_dev)) in dpaa_xdp_xmit()
3088 if (dpaa_xdp_xmit_frame(net_dev, xdpf)) in dpaa_xdp_xmit()
3135 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) in dpaa_ioctl() argument
3140 if (net_dev->phydev) in dpaa_ioctl()
3141 return phy_mii_ioctl(net_dev->phydev, rq, cmd); in dpaa_ioctl()
3145 return dpaa_ts_ioctl(net_dev, rq, cmd); in dpaa_ioctl()
3167 static int dpaa_napi_add(struct net_device *net_dev) in dpaa_napi_add() argument
3169 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_napi_add()
3176 netif_napi_add(net_dev, &percpu_priv->np.napi, in dpaa_napi_add()
3183 static void dpaa_napi_del(struct net_device *net_dev) in dpaa_napi_del() argument
3185 struct dpaa_priv *priv = netdev_priv(net_dev); in dpaa_napi_del()
3310 struct net_device *net_dev = NULL; in dpaa_eth_probe() local
3355 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); in dpaa_eth_probe()
3356 if (!net_dev) { in dpaa_eth_probe()
3362 SET_NETDEV_DEV(net_dev, dev->parent); in dpaa_eth_probe()
3363 dev_set_drvdata(dev, net_dev); in dpaa_eth_probe()
3365 priv = netdev_priv(net_dev); in dpaa_eth_probe()
3366 priv->net_dev = net_dev; in dpaa_eth_probe()
3372 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); in dpaa_eth_probe()
3385 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); in dpaa_eth_probe()
3396 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); in dpaa_eth_probe()
3398 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", in dpaa_eth_probe()
3399 net_dev->mtu); in dpaa_eth_probe()
3493 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); in dpaa_eth_probe()
3496 err = dpaa_napi_add(net_dev); in dpaa_eth_probe()
3500 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); in dpaa_eth_probe()
3504 dpaa_eth_sysfs_init(&net_dev->dev); in dpaa_eth_probe()
3506 netif_info(priv, probe, net_dev, "Probed interface %s\n", in dpaa_eth_probe()
3507 net_dev->name); in dpaa_eth_probe()
3512 dpaa_napi_del(net_dev); in dpaa_eth_probe()
3524 free_netdev(net_dev); in dpaa_eth_probe()
3531 struct net_device *net_dev; in dpaa_remove() local
3537 net_dev = dev_get_drvdata(dev); in dpaa_remove()
3539 priv = netdev_priv(net_dev); in dpaa_remove()
3544 unregister_netdev(net_dev); in dpaa_remove()
3553 dpaa_napi_del(net_dev); in dpaa_remove()
3557 free_netdev(net_dev); in dpaa_remove()