Lines Matching refs:txq
76 void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument
78 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats()
95 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument
97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats()
117 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument
119 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init()
122 hinic_txq_clean_stats(txq); in txq_stats_init()
500 struct hinic_txq *txq; in hinic_lb_xmit_frame() local
503 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame()
504 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
507 err = tx_map_skb(nic_dev, skb, txq->sges); in hinic_lb_xmit_frame()
513 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
517 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
523 tx_unmap_skb(nic_dev, skb, txq->sges); in hinic_lb_xmit_frame()
525 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
526 txq->txq_stats.tx_busy++; in hinic_lb_xmit_frame()
527 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
534 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame()
535 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_lb_xmit_frame()
540 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_lb_xmit_frame()
546 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
547 txq->txq_stats.tx_dropped++; in hinic_lb_xmit_frame()
548 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
561 struct hinic_txq *txq; in hinic_xmit_frame() local
564 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame()
565 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame()
578 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_xmit_frame()
579 txq->txq_stats.big_frags_pkts++; in hinic_xmit_frame()
580 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_xmit_frame()
583 if (nr_sges > txq->max_sges) { in hinic_xmit_frame()
588 err = tx_map_skb(nic_dev, skb, txq->sges); in hinic_xmit_frame()
594 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
601 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
607 tx_unmap_skb(nic_dev, skb, txq->sges); in hinic_xmit_frame()
609 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_xmit_frame()
610 txq->txq_stats.tx_busy++; in hinic_xmit_frame()
611 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_xmit_frame()
618 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_xmit_frame()
624 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_xmit_frame()
629 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_xmit_frame()
634 hinic_sq_return_wqe(txq->sq, wqe_size); in hinic_xmit_frame()
635 tx_unmap_skb(nic_dev, skb, txq->sges); in hinic_xmit_frame()
641 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_xmit_frame()
642 txq->txq_stats.tx_dropped++; in hinic_xmit_frame()
643 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_xmit_frame()
666 static void free_all_tx_skbs(struct hinic_txq *txq) in free_all_tx_skbs() argument
668 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); in free_all_tx_skbs()
669 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs()
683 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); in free_all_tx_skbs()
687 tx_free_skb(nic_dev, skb, txq->free_sges); in free_all_tx_skbs()
700 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); in free_tx_poll() local
701 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); in free_tx_poll()
702 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); in free_tx_poll()
704 struct hinic_sq *sq = txq->sq; in free_tx_poll()
738 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); in free_tx_poll()
742 tx_free_skb(nic_dev, skb, txq->free_sges); in free_tx_poll()
747 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); in free_tx_poll()
755 u64_stats_update_begin(&txq->txq_stats.syncp); in free_tx_poll()
756 txq->txq_stats.tx_wake++; in free_tx_poll()
757 u64_stats_update_end(&txq->txq_stats.syncp); in free_tx_poll()
760 u64_stats_update_begin(&txq->txq_stats.syncp); in free_tx_poll()
761 txq->txq_stats.bytes += tx_bytes; in free_tx_poll()
762 txq->txq_stats.pkts += pkts; in free_tx_poll()
763 u64_stats_update_end(&txq->txq_stats.syncp); in free_tx_poll()
780 struct hinic_txq *txq = data; in tx_irq() local
783 nic_dev = netdev_priv(txq->netdev); in tx_irq()
788 txq->sq->msix_entry, in tx_irq()
791 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); in tx_irq()
793 napi_schedule(&txq->napi); in tx_irq()
797 static int tx_request_irq(struct hinic_txq *txq) in tx_request_irq() argument
799 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); in tx_request_irq()
805 struct hinic_sq *sq = txq->sq; in tx_request_irq()
811 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight); in tx_request_irq()
826 netif_err(nic_dev, drv, txq->netdev, in tx_request_irq()
828 netif_napi_del(&txq->napi); in tx_request_irq()
832 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); in tx_request_irq()
835 netif_napi_del(&txq->napi); in tx_request_irq()
842 static void tx_free_irq(struct hinic_txq *txq) in tx_free_irq() argument
844 struct hinic_sq *sq = txq->sq; in tx_free_irq()
846 free_irq(sq->irq, txq); in tx_free_irq()
847 netif_napi_del(&txq->napi); in tx_free_irq()
858 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, in hinic_init_txq() argument
867 txq->netdev = netdev; in hinic_init_txq()
868 txq->sq = sq; in hinic_init_txq()
870 txq_stats_init(txq); in hinic_init_txq()
872 txq->max_sges = HINIC_MAX_SQ_BUFDESCS; in hinic_init_txq()
874 sges_size = txq->max_sges * sizeof(*txq->sges); in hinic_init_txq()
875 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); in hinic_init_txq()
876 if (!txq->sges) in hinic_init_txq()
879 sges_size = txq->max_sges * sizeof(*txq->free_sges); in hinic_init_txq()
880 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); in hinic_init_txq()
881 if (!txq->free_sges) { in hinic_init_txq()
887 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); in hinic_init_txq()
888 if (!txq->irq_name) { in hinic_init_txq()
893 sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id); in hinic_init_txq()
900 err = tx_request_irq(txq); in hinic_init_txq()
910 devm_kfree(&netdev->dev, txq->irq_name); in hinic_init_txq()
913 devm_kfree(&netdev->dev, txq->free_sges); in hinic_init_txq()
916 devm_kfree(&netdev->dev, txq->sges); in hinic_init_txq()
924 void hinic_clean_txq(struct hinic_txq *txq) in hinic_clean_txq() argument
926 struct net_device *netdev = txq->netdev; in hinic_clean_txq()
928 tx_free_irq(txq); in hinic_clean_txq()
930 free_all_tx_skbs(txq); in hinic_clean_txq()
932 devm_kfree(&netdev->dev, txq->irq_name); in hinic_clean_txq()
933 devm_kfree(&netdev->dev, txq->free_sges); in hinic_clean_txq()
934 devm_kfree(&netdev->dev, txq->sges); in hinic_clean_txq()