Lines Matching refs:priv

35 	struct gve_priv *priv = netdev_priv(dev);  in gve_start_xmit()  local
37 if (gve_is_gqi(priv)) in gve_start_xmit()
45 struct gve_priv *priv = netdev_priv(dev); in gve_get_stats() local
50 if (priv->rx) { in gve_get_stats()
51 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
54 u64_stats_fetch_begin(&priv->rx[ring].statss); in gve_get_stats()
55 packets = priv->rx[ring].rpackets; in gve_get_stats()
56 bytes = priv->rx[ring].rbytes; in gve_get_stats()
57 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, in gve_get_stats()
63 if (priv->tx) { in gve_get_stats()
64 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { in gve_get_stats()
67 u64_stats_fetch_begin(&priv->tx[ring].statss); in gve_get_stats()
68 packets = priv->tx[ring].pkt_done; in gve_get_stats()
69 bytes = priv->tx[ring].bytes_done; in gve_get_stats()
70 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, in gve_get_stats()
78 static int gve_alloc_counter_array(struct gve_priv *priv) in gve_alloc_counter_array() argument
80 priv->counter_array = in gve_alloc_counter_array()
81 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_counter_array()
82 priv->num_event_counters * in gve_alloc_counter_array()
83 sizeof(*priv->counter_array), in gve_alloc_counter_array()
84 &priv->counter_array_bus, GFP_KERNEL); in gve_alloc_counter_array()
85 if (!priv->counter_array) in gve_alloc_counter_array()
91 static void gve_free_counter_array(struct gve_priv *priv) in gve_free_counter_array() argument
93 if (!priv->counter_array) in gve_free_counter_array()
96 dma_free_coherent(&priv->pdev->dev, in gve_free_counter_array()
97 priv->num_event_counters * in gve_free_counter_array()
98 sizeof(*priv->counter_array), in gve_free_counter_array()
99 priv->counter_array, priv->counter_array_bus); in gve_free_counter_array()
100 priv->counter_array = NULL; in gve_free_counter_array()
106 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_stats_report_task() local
108 if (gve_get_do_report_stats(priv)) { in gve_stats_report_task()
109 gve_handle_report_stats(priv); in gve_stats_report_task()
110 gve_clear_do_report_stats(priv); in gve_stats_report_task()
114 static void gve_stats_report_schedule(struct gve_priv *priv) in gve_stats_report_schedule() argument
116 if (!gve_get_probe_in_progress(priv) && in gve_stats_report_schedule()
117 !gve_get_reset_in_progress(priv)) { in gve_stats_report_schedule()
118 gve_set_do_report_stats(priv); in gve_stats_report_schedule()
119 queue_work(priv->gve_wq, &priv->stats_report_task); in gve_stats_report_schedule()
125 struct gve_priv *priv = from_timer(priv, t, stats_report_timer); in gve_stats_report_timer() local
127 mod_timer(&priv->stats_report_timer, in gve_stats_report_timer()
129 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_stats_report_timer()
130 gve_stats_report_schedule(priv); in gve_stats_report_timer()
133 static int gve_alloc_stats_report(struct gve_priv *priv) in gve_alloc_stats_report() argument
138 priv->tx_cfg.num_queues; in gve_alloc_stats_report()
140 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
141 priv->stats_report_len = struct_size(priv->stats_report, stats, in gve_alloc_stats_report()
143 priv->stats_report = in gve_alloc_stats_report()
144 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_alloc_stats_report()
145 &priv->stats_report_bus, GFP_KERNEL); in gve_alloc_stats_report()
146 if (!priv->stats_report) in gve_alloc_stats_report()
149 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); in gve_alloc_stats_report()
150 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; in gve_alloc_stats_report()
154 static void gve_free_stats_report(struct gve_priv *priv) in gve_free_stats_report() argument
156 if (!priv->stats_report) in gve_free_stats_report()
159 del_timer_sync(&priv->stats_report_timer); in gve_free_stats_report()
160 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_free_stats_report()
161 priv->stats_report, priv->stats_report_bus); in gve_free_stats_report()
162 priv->stats_report = NULL; in gve_free_stats_report()
167 struct gve_priv *priv = arg; in gve_mgmnt_intr() local
169 queue_work(priv->gve_wq, &priv->service_task); in gve_mgmnt_intr()
176 struct gve_priv *priv = block->priv; in gve_intr() local
178 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_intr()
197 struct gve_priv *priv; in gve_napi_poll() local
201 priv = block->priv; in gve_napi_poll()
215 irq_doorbell = gve_irq_doorbell(priv, block); in gve_napi_poll()
224 reschedule |= gve_tx_clean_pending(priv, block->tx); in gve_napi_poll()
238 struct gve_priv *priv = block->priv; in gve_napi_poll_dqo() local
252 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
275 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
282 static int gve_alloc_notify_blocks(struct gve_priv *priv) in gve_alloc_notify_blocks() argument
284 int num_vecs_requested = priv->num_ntfy_blks + 1; in gve_alloc_notify_blocks()
285 char *name = priv->dev->name; in gve_alloc_notify_blocks()
291 priv->msix_vectors = kvcalloc(num_vecs_requested, in gve_alloc_notify_blocks()
292 sizeof(*priv->msix_vectors), GFP_KERNEL); in gve_alloc_notify_blocks()
293 if (!priv->msix_vectors) in gve_alloc_notify_blocks()
296 priv->msix_vectors[i].entry = i; in gve_alloc_notify_blocks()
297 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, in gve_alloc_notify_blocks()
300 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", in gve_alloc_notify_blocks()
310 priv->num_ntfy_blks = new_num_ntfy_blks; in gve_alloc_notify_blocks()
311 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_alloc_notify_blocks()
312 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
314 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
316 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
318 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
319 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
320 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
321 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
322 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
323 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
326 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); in gve_alloc_notify_blocks()
329 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt", in gve_alloc_notify_blocks()
331 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, in gve_alloc_notify_blocks()
332 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); in gve_alloc_notify_blocks()
334 dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); in gve_alloc_notify_blocks()
337 priv->ntfy_blocks = in gve_alloc_notify_blocks()
338 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_notify_blocks()
339 priv->num_ntfy_blks * in gve_alloc_notify_blocks()
340 sizeof(*priv->ntfy_blocks), in gve_alloc_notify_blocks()
341 &priv->ntfy_block_bus, GFP_KERNEL); in gve_alloc_notify_blocks()
342 if (!priv->ntfy_blocks) { in gve_alloc_notify_blocks()
347 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_alloc_notify_blocks()
348 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_alloc_notify_blocks()
353 block->priv = priv; in gve_alloc_notify_blocks()
354 err = request_irq(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
355 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, in gve_alloc_notify_blocks()
358 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
362 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
368 struct gve_notify_block *block = &priv->ntfy_blocks[j]; in gve_alloc_notify_blocks()
371 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
373 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_alloc_notify_blocks()
375 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_alloc_notify_blocks()
376 sizeof(*priv->ntfy_blocks), in gve_alloc_notify_blocks()
377 priv->ntfy_blocks, priv->ntfy_block_bus); in gve_alloc_notify_blocks()
378 priv->ntfy_blocks = NULL; in gve_alloc_notify_blocks()
380 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_alloc_notify_blocks()
382 pci_disable_msix(priv->pdev); in gve_alloc_notify_blocks()
384 kvfree(priv->msix_vectors); in gve_alloc_notify_blocks()
385 priv->msix_vectors = NULL; in gve_alloc_notify_blocks()
389 static void gve_free_notify_blocks(struct gve_priv *priv) in gve_free_notify_blocks() argument
393 if (!priv->msix_vectors) in gve_free_notify_blocks()
397 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_free_notify_blocks()
398 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_free_notify_blocks()
401 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_free_notify_blocks()
403 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_free_notify_blocks()
405 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_free_notify_blocks()
406 dma_free_coherent(&priv->pdev->dev, in gve_free_notify_blocks()
407 priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks), in gve_free_notify_blocks()
408 priv->ntfy_blocks, priv->ntfy_block_bus); in gve_free_notify_blocks()
409 priv->ntfy_blocks = NULL; in gve_free_notify_blocks()
410 pci_disable_msix(priv->pdev); in gve_free_notify_blocks()
411 kvfree(priv->msix_vectors); in gve_free_notify_blocks()
412 priv->msix_vectors = NULL; in gve_free_notify_blocks()
415 static int gve_setup_device_resources(struct gve_priv *priv) in gve_setup_device_resources() argument
419 err = gve_alloc_counter_array(priv); in gve_setup_device_resources()
422 err = gve_alloc_notify_blocks(priv); in gve_setup_device_resources()
425 err = gve_alloc_stats_report(priv); in gve_setup_device_resources()
428 err = gve_adminq_configure_device_resources(priv, in gve_setup_device_resources()
429 priv->counter_array_bus, in gve_setup_device_resources()
430 priv->num_event_counters, in gve_setup_device_resources()
431 priv->ntfy_block_bus, in gve_setup_device_resources()
432 priv->num_ntfy_blks); in gve_setup_device_resources()
434 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
440 if (priv->queue_format == GVE_DQO_RDA_FORMAT) { in gve_setup_device_resources()
441 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), in gve_setup_device_resources()
443 if (!priv->ptype_lut_dqo) { in gve_setup_device_resources()
447 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); in gve_setup_device_resources()
449 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
455 err = gve_adminq_report_stats(priv, priv->stats_report_len, in gve_setup_device_resources()
456 priv->stats_report_bus, in gve_setup_device_resources()
459 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
461 gve_set_device_resources_ok(priv); in gve_setup_device_resources()
465 kvfree(priv->ptype_lut_dqo); in gve_setup_device_resources()
466 priv->ptype_lut_dqo = NULL; in gve_setup_device_resources()
468 gve_free_stats_report(priv); in gve_setup_device_resources()
470 gve_free_notify_blocks(priv); in gve_setup_device_resources()
472 gve_free_counter_array(priv); in gve_setup_device_resources()
477 static void gve_trigger_reset(struct gve_priv *priv);
479 static void gve_teardown_device_resources(struct gve_priv *priv) in gve_teardown_device_resources() argument
484 if (gve_get_device_resources_ok(priv)) { in gve_teardown_device_resources()
486 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); in gve_teardown_device_resources()
488 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
490 gve_trigger_reset(priv); in gve_teardown_device_resources()
492 err = gve_adminq_deconfigure_device_resources(priv); in gve_teardown_device_resources()
494 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
497 gve_trigger_reset(priv); in gve_teardown_device_resources()
501 kvfree(priv->ptype_lut_dqo); in gve_teardown_device_resources()
502 priv->ptype_lut_dqo = NULL; in gve_teardown_device_resources()
504 gve_free_counter_array(priv); in gve_teardown_device_resources()
505 gve_free_notify_blocks(priv); in gve_teardown_device_resources()
506 gve_free_stats_report(priv); in gve_teardown_device_resources()
507 gve_clear_device_resources_ok(priv); in gve_teardown_device_resources()
510 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx, in gve_add_napi() argument
513 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_add_napi()
515 netif_napi_add(priv->dev, &block->napi, gve_poll, in gve_add_napi()
519 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) in gve_remove_napi() argument
521 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_remove_napi()
526 static int gve_register_qpls(struct gve_priv *priv) in gve_register_qpls() argument
528 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_register_qpls()
533 err = gve_adminq_register_page_list(priv, &priv->qpls[i]); in gve_register_qpls()
535 netif_err(priv, drv, priv->dev, in gve_register_qpls()
537 priv->qpls[i].id); in gve_register_qpls()
547 static int gve_unregister_qpls(struct gve_priv *priv) in gve_unregister_qpls() argument
549 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_unregister_qpls()
554 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); in gve_unregister_qpls()
557 netif_err(priv, drv, priv->dev, in gve_unregister_qpls()
559 priv->qpls[i].id); in gve_unregister_qpls()
566 static int gve_create_rings(struct gve_priv *priv) in gve_create_rings() argument
571 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); in gve_create_rings()
573 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", in gve_create_rings()
574 priv->tx_cfg.num_queues); in gve_create_rings()
580 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", in gve_create_rings()
581 priv->tx_cfg.num_queues); in gve_create_rings()
583 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rings()
585 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", in gve_create_rings()
586 priv->rx_cfg.num_queues); in gve_create_rings()
592 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", in gve_create_rings()
593 priv->rx_cfg.num_queues); in gve_create_rings()
595 if (gve_is_gqi(priv)) { in gve_create_rings()
602 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rings()
603 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
605 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rings()
607 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
614 static void add_napi_init_sync_stats(struct gve_priv *priv, in add_napi_init_sync_stats() argument
621 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in add_napi_init_sync_stats()
622 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in add_napi_init_sync_stats()
624 u64_stats_init(&priv->tx[i].statss); in add_napi_init_sync_stats()
625 priv->tx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
626 gve_add_napi(priv, ntfy_idx, napi_poll); in add_napi_init_sync_stats()
629 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in add_napi_init_sync_stats()
630 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i); in add_napi_init_sync_stats()
632 u64_stats_init(&priv->rx[i].statss); in add_napi_init_sync_stats()
633 priv->rx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
634 gve_add_napi(priv, ntfy_idx, napi_poll); in add_napi_init_sync_stats()
638 static void gve_tx_free_rings(struct gve_priv *priv) in gve_tx_free_rings() argument
640 if (gve_is_gqi(priv)) { in gve_tx_free_rings()
641 gve_tx_free_rings_gqi(priv); in gve_tx_free_rings()
643 gve_tx_free_rings_dqo(priv); in gve_tx_free_rings()
647 static int gve_alloc_rings(struct gve_priv *priv) in gve_alloc_rings() argument
652 priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), in gve_alloc_rings()
654 if (!priv->tx) in gve_alloc_rings()
657 if (gve_is_gqi(priv)) in gve_alloc_rings()
658 err = gve_tx_alloc_rings(priv); in gve_alloc_rings()
660 err = gve_tx_alloc_rings_dqo(priv); in gve_alloc_rings()
665 priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), in gve_alloc_rings()
667 if (!priv->rx) { in gve_alloc_rings()
672 if (gve_is_gqi(priv)) in gve_alloc_rings()
673 err = gve_rx_alloc_rings(priv); in gve_alloc_rings()
675 err = gve_rx_alloc_rings_dqo(priv); in gve_alloc_rings()
679 if (gve_is_gqi(priv)) in gve_alloc_rings()
680 add_napi_init_sync_stats(priv, gve_napi_poll); in gve_alloc_rings()
682 add_napi_init_sync_stats(priv, gve_napi_poll_dqo); in gve_alloc_rings()
687 kvfree(priv->rx); in gve_alloc_rings()
688 priv->rx = NULL; in gve_alloc_rings()
690 gve_tx_free_rings(priv); in gve_alloc_rings()
692 kvfree(priv->tx); in gve_alloc_rings()
693 priv->tx = NULL; in gve_alloc_rings()
697 static int gve_destroy_rings(struct gve_priv *priv) in gve_destroy_rings() argument
701 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); in gve_destroy_rings()
703 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
708 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); in gve_destroy_rings()
709 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rings()
711 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
716 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); in gve_destroy_rings()
720 static void gve_rx_free_rings(struct gve_priv *priv) in gve_rx_free_rings() argument
722 if (gve_is_gqi(priv)) in gve_rx_free_rings()
723 gve_rx_free_rings_gqi(priv); in gve_rx_free_rings()
725 gve_rx_free_rings_dqo(priv); in gve_rx_free_rings()
728 static void gve_free_rings(struct gve_priv *priv) in gve_free_rings() argument
733 if (priv->tx) { in gve_free_rings()
734 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_free_rings()
735 ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in gve_free_rings()
736 gve_remove_napi(priv, ntfy_idx); in gve_free_rings()
738 gve_tx_free_rings(priv); in gve_free_rings()
739 kvfree(priv->tx); in gve_free_rings()
740 priv->tx = NULL; in gve_free_rings()
742 if (priv->rx) { in gve_free_rings()
743 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_free_rings()
744 ntfy_idx = gve_rx_idx_to_ntfy(priv, i); in gve_free_rings()
745 gve_remove_napi(priv, ntfy_idx); in gve_free_rings()
747 gve_rx_free_rings(priv); in gve_free_rings()
748 kvfree(priv->rx); in gve_free_rings()
749 priv->rx = NULL; in gve_free_rings()
753 int gve_alloc_page(struct gve_priv *priv, struct device *dev, in gve_alloc_page() argument
759 priv->page_alloc_fail++; in gve_alloc_page()
764 priv->dma_mapping_error++; in gve_alloc_page()
771 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, in gve_alloc_queue_page_list() argument
774 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_alloc_queue_page_list()
778 if (pages + priv->num_registered_pages > priv->max_registered_pages) { in gve_alloc_queue_page_list()
779 netif_err(priv, drv, priv->dev, in gve_alloc_queue_page_list()
781 pages + priv->num_registered_pages, in gve_alloc_queue_page_list()
782 priv->max_registered_pages); in gve_alloc_queue_page_list()
798 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], in gve_alloc_queue_page_list()
800 gve_qpl_dma_dir(priv, id)); in gve_alloc_queue_page_list()
806 priv->num_registered_pages += pages; in gve_alloc_queue_page_list()
820 static void gve_free_queue_page_list(struct gve_priv *priv, in gve_free_queue_page_list() argument
823 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_free_queue_page_list()
832 gve_free_page(&priv->pdev->dev, qpl->pages[i], in gve_free_queue_page_list()
833 qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); in gve_free_queue_page_list()
838 priv->num_registered_pages -= qpl->num_entries; in gve_free_queue_page_list()
841 static int gve_alloc_qpls(struct gve_priv *priv) in gve_alloc_qpls() argument
843 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_alloc_qpls()
848 if (priv->queue_format == GVE_GQI_RDA_FORMAT) in gve_alloc_qpls()
851 priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); in gve_alloc_qpls()
852 if (!priv->qpls) in gve_alloc_qpls()
855 for (i = 0; i < gve_num_tx_qpls(priv); i++) { in gve_alloc_qpls()
856 err = gve_alloc_queue_page_list(priv, i, in gve_alloc_qpls()
857 priv->tx_pages_per_qpl); in gve_alloc_qpls()
862 err = gve_alloc_queue_page_list(priv, i, in gve_alloc_qpls()
863 priv->rx_data_slot_cnt); in gve_alloc_qpls()
868 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * in gve_alloc_qpls()
870 priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), in gve_alloc_qpls()
872 if (!priv->qpl_cfg.qpl_id_map) { in gve_alloc_qpls()
881 gve_free_queue_page_list(priv, j); in gve_alloc_qpls()
882 kvfree(priv->qpls); in gve_alloc_qpls()
886 static void gve_free_qpls(struct gve_priv *priv) in gve_free_qpls() argument
888 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_free_qpls()
892 if (priv->queue_format == GVE_GQI_RDA_FORMAT) in gve_free_qpls()
895 kvfree(priv->qpl_cfg.qpl_id_map); in gve_free_qpls()
898 gve_free_queue_page_list(priv, i); in gve_free_qpls()
900 kvfree(priv->qpls); in gve_free_qpls()
907 void gve_schedule_reset(struct gve_priv *priv) in gve_schedule_reset() argument
909 gve_set_do_reset(priv); in gve_schedule_reset()
910 queue_work(priv->gve_wq, &priv->service_task); in gve_schedule_reset()
913 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
914 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
915 static void gve_turndown(struct gve_priv *priv);
916 static void gve_turnup(struct gve_priv *priv);
920 struct gve_priv *priv = netdev_priv(dev); in gve_open() local
923 err = gve_alloc_qpls(priv); in gve_open()
927 err = gve_alloc_rings(priv); in gve_open()
931 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); in gve_open()
934 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); in gve_open()
938 err = gve_register_qpls(priv); in gve_open()
942 if (!gve_is_gqi(priv)) { in gve_open()
946 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; in gve_open()
948 err = gve_create_rings(priv); in gve_open()
952 gve_set_device_rings_ok(priv); in gve_open()
954 if (gve_get_report_stats(priv)) in gve_open()
955 mod_timer(&priv->stats_report_timer, in gve_open()
957 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_open()
959 gve_turnup(priv); in gve_open()
960 queue_work(priv->gve_wq, &priv->service_task); in gve_open()
961 priv->interface_up_cnt++; in gve_open()
965 gve_free_rings(priv); in gve_open()
967 gve_free_qpls(priv); in gve_open()
974 if (gve_get_reset_in_progress(priv)) in gve_open()
977 gve_reset_and_teardown(priv, true); in gve_open()
979 gve_reset_recovery(priv, false); in gve_open()
986 struct gve_priv *priv = netdev_priv(dev); in gve_close() local
990 if (gve_get_device_rings_ok(priv)) { in gve_close()
991 gve_turndown(priv); in gve_close()
992 err = gve_destroy_rings(priv); in gve_close()
995 err = gve_unregister_qpls(priv); in gve_close()
998 gve_clear_device_rings_ok(priv); in gve_close()
1000 del_timer_sync(&priv->stats_report_timer); in gve_close()
1002 gve_free_rings(priv); in gve_close()
1003 gve_free_qpls(priv); in gve_close()
1004 priv->interface_down_cnt++; in gve_close()
1011 if (gve_get_reset_in_progress(priv)) in gve_close()
1014 gve_reset_and_teardown(priv, true); in gve_close()
1015 return gve_reset_recovery(priv, false); in gve_close()
1018 int gve_adjust_queues(struct gve_priv *priv, in gve_adjust_queues() argument
1024 if (netif_carrier_ok(priv->dev)) { in gve_adjust_queues()
1029 err = gve_close(priv->dev); in gve_adjust_queues()
1035 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1036 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1038 err = gve_open(priv->dev); in gve_adjust_queues()
1045 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1046 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1050 netif_err(priv, drv, priv->dev, in gve_adjust_queues()
1052 gve_turndown(priv); in gve_adjust_queues()
1056 static void gve_turndown(struct gve_priv *priv) in gve_turndown() argument
1060 if (netif_carrier_ok(priv->dev)) in gve_turndown()
1061 netif_carrier_off(priv->dev); in gve_turndown()
1063 if (!gve_get_napi_enabled(priv)) in gve_turndown()
1067 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_turndown()
1068 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turndown()
1069 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1073 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turndown()
1074 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turndown()
1075 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1081 netif_tx_disable(priv->dev); in gve_turndown()
1083 gve_clear_napi_enabled(priv); in gve_turndown()
1084 gve_clear_report_stats(priv); in gve_turndown()
1087 static void gve_turnup(struct gve_priv *priv) in gve_turnup() argument
1092 netif_tx_start_all_queues(priv->dev); in gve_turnup()
1095 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_turnup()
1096 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turnup()
1097 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1100 if (gve_is_gqi(priv)) { in gve_turnup()
1101 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1105 gve_write_irq_doorbell_dqo(priv, block, val); in gve_turnup()
1108 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turnup()
1109 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turnup()
1110 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1113 if (gve_is_gqi(priv)) { in gve_turnup()
1114 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1118 gve_write_irq_doorbell_dqo(priv, block, val); in gve_turnup()
1122 gve_set_napi_enabled(priv); in gve_turnup()
1129 struct gve_priv *priv; in gve_tx_timeout() local
1135 priv = netdev_priv(dev); in gve_tx_timeout()
1136 if (txqueue > priv->tx_cfg.num_queues) in gve_tx_timeout()
1139 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); in gve_tx_timeout()
1140 if (ntfy_idx >= priv->num_ntfy_blks) in gve_tx_timeout()
1143 block = &priv->ntfy_blocks[ntfy_idx]; in gve_tx_timeout()
1153 last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_timeout()
1156 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_tx_timeout()
1163 gve_schedule_reset(priv); in gve_tx_timeout()
1168 priv->tx_timeo_cnt++; in gve_tx_timeout()
1175 struct gve_priv *priv = netdev_priv(netdev); in gve_set_features() local
1202 netif_err(priv, drv, netdev, in gve_set_features()
1216 static void gve_handle_status(struct gve_priv *priv, u32 status) in gve_handle_status() argument
1219 dev_info(&priv->pdev->dev, "Device requested reset.\n"); in gve_handle_status()
1220 gve_set_do_reset(priv); in gve_handle_status()
1223 priv->stats_report_trigger_cnt++; in gve_handle_status()
1224 gve_set_do_report_stats(priv); in gve_handle_status()
1228 static void gve_handle_reset(struct gve_priv *priv) in gve_handle_reset() argument
1234 if (gve_get_probe_in_progress(priv)) in gve_handle_reset()
1237 if (gve_get_do_reset(priv)) { in gve_handle_reset()
1239 gve_reset(priv, false); in gve_handle_reset()
1244 void gve_handle_report_stats(struct gve_priv *priv) in gve_handle_report_stats() argument
1246 struct stats *stats = priv->stats_report->stats; in gve_handle_report_stats()
1251 if (!gve_get_report_stats(priv)) in gve_handle_report_stats()
1254 be64_add_cpu(&priv->stats_report->written_count, 1); in gve_handle_report_stats()
1256 if (priv->tx) { in gve_handle_report_stats()
1257 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_handle_report_stats()
1262 if (gve_is_gqi(priv)) { in gve_handle_report_stats()
1263 last_completion = priv->tx[idx].done; in gve_handle_report_stats()
1264 tx_frames = priv->tx[idx].req; in gve_handle_report_stats()
1268 start = u64_stats_fetch_begin(&priv->tx[idx].statss); in gve_handle_report_stats()
1269 tx_bytes = priv->tx[idx].bytes_done; in gve_handle_report_stats()
1270 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); in gve_handle_report_stats()
1273 .value = cpu_to_be64(priv->tx[idx].wake_queue), in gve_handle_report_stats()
1278 .value = cpu_to_be64(priv->tx[idx].stop_queue), in gve_handle_report_stats()
1298 .value = cpu_to_be64(priv->tx[idx].queue_timeout), in gve_handle_report_stats()
1304 if (priv->rx) { in gve_handle_report_stats()
1305 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_handle_report_stats()
1308 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
1313 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()
1320 static void gve_handle_link_status(struct gve_priv *priv, bool link_status) in gve_handle_link_status() argument
1322 if (!gve_get_napi_enabled(priv)) in gve_handle_link_status()
1325 if (link_status == netif_carrier_ok(priv->dev)) in gve_handle_link_status()
1329 netdev_info(priv->dev, "Device link is up.\n"); in gve_handle_link_status()
1330 netif_carrier_on(priv->dev); in gve_handle_link_status()
1332 netdev_info(priv->dev, "Device link is down.\n"); in gve_handle_link_status()
1333 netif_carrier_off(priv->dev); in gve_handle_link_status()
1340 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_service_task() local
1342 u32 status = ioread32be(&priv->reg_bar0->device_status); in gve_service_task()
1344 gve_handle_status(priv, status); in gve_service_task()
1346 gve_handle_reset(priv); in gve_service_task()
1347 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_service_task()
1350 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) in gve_init_priv() argument
1356 err = gve_adminq_alloc(&priv->pdev->dev, priv); in gve_init_priv()
1358 dev_err(&priv->pdev->dev, in gve_init_priv()
1366 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; in gve_init_priv()
1368 err = gve_adminq_describe_device(priv); in gve_init_priv()
1370 dev_err(&priv->pdev->dev, in gve_init_priv()
1374 priv->dev->mtu = priv->dev->max_mtu; in gve_init_priv()
1375 num_ntfy = pci_msix_vec_count(priv->pdev); in gve_init_priv()
1377 dev_err(&priv->pdev->dev, in gve_init_priv()
1382 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", in gve_init_priv()
1388 priv->num_registered_pages = 0; in gve_init_priv()
1389 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; in gve_init_priv()
1393 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; in gve_init_priv()
1394 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_init_priv()
1396 priv->tx_cfg.max_queues = in gve_init_priv()
1397 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
1398 priv->rx_cfg.max_queues = in gve_init_priv()
1399 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
1401 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_init_priv()
1402 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_init_priv()
1403 if (priv->default_num_queues > 0) { in gve_init_priv()
1404 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
1405 priv->tx_cfg.num_queues); in gve_init_priv()
1406 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
1407 priv->rx_cfg.num_queues); in gve_init_priv()
1410 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", in gve_init_priv()
1411 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); in gve_init_priv()
1412 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", in gve_init_priv()
1413 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); in gve_init_priv()
1416 err = gve_setup_device_resources(priv); in gve_init_priv()
1420 gve_adminq_free(&priv->pdev->dev, priv); in gve_init_priv()
1424 static void gve_teardown_priv_resources(struct gve_priv *priv) in gve_teardown_priv_resources() argument
1426 gve_teardown_device_resources(priv); in gve_teardown_priv_resources()
1427 gve_adminq_free(&priv->pdev->dev, priv); in gve_teardown_priv_resources()
1430 static void gve_trigger_reset(struct gve_priv *priv) in gve_trigger_reset() argument
1433 gve_adminq_release(priv); in gve_trigger_reset()
1436 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) in gve_reset_and_teardown() argument
1438 gve_trigger_reset(priv); in gve_reset_and_teardown()
1441 gve_close(priv->dev); in gve_reset_and_teardown()
1442 gve_teardown_priv_resources(priv); in gve_reset_and_teardown()
1445 static int gve_reset_recovery(struct gve_priv *priv, bool was_up) in gve_reset_recovery() argument
1449 err = gve_init_priv(priv, true); in gve_reset_recovery()
1453 err = gve_open(priv->dev); in gve_reset_recovery()
1459 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); in gve_reset_recovery()
1460 gve_turndown(priv); in gve_reset_recovery()
1464 int gve_reset(struct gve_priv *priv, bool attempt_teardown) in gve_reset() argument
1466 bool was_up = netif_carrier_ok(priv->dev); in gve_reset()
1469 dev_info(&priv->pdev->dev, "Performing reset\n"); in gve_reset()
1470 gve_clear_do_reset(priv); in gve_reset()
1471 gve_set_reset_in_progress(priv); in gve_reset()
1476 gve_turndown(priv); in gve_reset()
1477 gve_reset_and_teardown(priv, was_up); in gve_reset()
1481 err = gve_close(priv->dev); in gve_reset()
1484 gve_reset_and_teardown(priv, was_up); in gve_reset()
1487 gve_teardown_priv_resources(priv); in gve_reset()
1491 err = gve_reset_recovery(priv, was_up); in gve_reset()
1492 gve_clear_reset_in_progress(priv); in gve_reset()
1493 priv->reset_cnt++; in gve_reset()
1494 priv->interface_up_cnt = 0; in gve_reset()
1495 priv->interface_down_cnt = 0; in gve_reset()
1496 priv->stats_report_trigger_cnt = 0; in gve_reset()
1523 struct gve_priv *priv; in gve_probe() local
1561 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); in gve_probe()
1590 priv = netdev_priv(dev); in gve_probe()
1591 priv->dev = dev; in gve_probe()
1592 priv->pdev = pdev; in gve_probe()
1593 priv->msg_enable = DEFAULT_MSG_LEVEL; in gve_probe()
1594 priv->reg_bar0 = reg_bar; in gve_probe()
1595 priv->db_bar2 = db_bar; in gve_probe()
1596 priv->service_task_flags = 0x0; in gve_probe()
1597 priv->state_flags = 0x0; in gve_probe()
1598 priv->ethtool_flags = 0x0; in gve_probe()
1600 gve_set_probe_in_progress(priv); in gve_probe()
1601 priv->gve_wq = alloc_ordered_workqueue("gve", 0); in gve_probe()
1602 if (!priv->gve_wq) { in gve_probe()
1607 INIT_WORK(&priv->service_task, gve_service_task); in gve_probe()
1608 INIT_WORK(&priv->stats_report_task, gve_stats_report_task); in gve_probe()
1609 priv->tx_cfg.max_queues = max_tx_queues; in gve_probe()
1610 priv->rx_cfg.max_queues = max_rx_queues; in gve_probe()
1612 err = gve_init_priv(priv, false); in gve_probe()
1621 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); in gve_probe()
1622 gve_clear_probe_in_progress(priv); in gve_probe()
1623 queue_work(priv->gve_wq, &priv->service_task); in gve_probe()
1627 gve_teardown_priv_resources(priv); in gve_probe()
1630 destroy_workqueue(priv->gve_wq); in gve_probe()
1652 struct gve_priv *priv = netdev_priv(netdev); in gve_remove() local
1653 __be32 __iomem *db_bar = priv->db_bar2; in gve_remove()
1654 void __iomem *reg_bar = priv->reg_bar0; in gve_remove()
1657 gve_teardown_priv_resources(priv); in gve_remove()
1658 destroy_workqueue(priv->gve_wq); in gve_remove()