/linux/drivers/net/ethernet/sfc/ |
A D | tx_common.c | 35 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue() 38 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_probe_tx_queue() 43 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_probe_tx_queue() 55 tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; in efx_probe_tx_queue() 101 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue() 108 while (tx_queue->read_count != tx_queue->write_count) { in efx_fini_tx_queue() 111 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_fini_tx_queue() 127 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue() 183 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer() 206 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers() [all …]
|
A D | ef100_tx.c | 26 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, in ef100_tx_probe() 35 tx_queue->core_txq = in ef100_tx_init() 46 tx_queue->tso_version = 3; in ef100_tx_init() 98 ++tx_queue->insert_count; in ef100_tx_can_tso() 117 if (unlikely(tx_queue->notify_count == tx_queue->write_count)) in ef100_notify_tx_desc() 120 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef100_notify_tx_desc() 125 tx_queue->notify_count = tx_queue->write_count; in ef100_notify_tx_desc() 131 ++tx_queue->pushes; in ef100_tx_push_buffers() 340 tx_queue->ptr_mask; in ef100_ev_tx() 361 if (!tx_queue->buffer || !tx_queue->ptr_mask) { in ef100_enqueue_skb() [all …]
|
A D | tx.c | 122 ++tx_queue->insert_count; in efx_enqueue_skb_copy() 280 if (!tx_queue->piobuf) in efx_tx_may_pio() 286 if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) in efx_tx_may_pio() 374 tx_queue->cb_packets++; in __efx_enqueue_skb() 391 tx_queue->tso_bursts++; in __efx_enqueue_skb() 395 tx_queue->tx_packets++; in __efx_enqueue_skb() 462 tx_queue->read_count - tx_queue->insert_count; in efx_xdp_tx_buffers() 561 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_xmit_done_single() 571 tx_queue->queue); in efx_xmit_done_single() 582 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_xmit_done_single() [all …]
|
A D | nic_common.h | 60 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in efx_tx_desc() argument 62 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; in efx_tx_desc() 92 bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count); in efx_nic_may_push_tx_desc() 94 tx_queue->empty_read_count = 0; in efx_nic_may_push_tx_desc() 95 return was_empty && tx_queue->write_count - write_count == 1; in efx_nic_may_push_tx_desc() 121 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) in efx_nic_probe_tx() argument 123 return tx_queue->efx->type->tx_probe(tx_queue); in efx_nic_probe_tx() 127 tx_queue->efx->type->tx_init(tx_queue); in efx_nic_init_tx() 131 if (tx_queue->efx->type->tx_remove) in efx_nic_remove_tx() 132 tx_queue->efx->type->tx_remove(tx_queue); in efx_nic_remove_tx() [all …]
|
A D | tx_tso.c | 113 ++tx_queue->insert_count; in efx_tx_queue_insert() 116 tx_queue->read_count >= in efx_tx_queue_insert() 117 tx_queue->efx->txq_entries); in efx_tx_queue_insert() 121 dma_len = tx_queue->efx->type->tx_limit_len(tx_queue, in efx_tx_queue_insert() 170 struct efx_tx_queue *tx_queue, in tso_start() argument 319 ++tx_queue->insert_count; in tso_start_new_packet() 339 ++tx_queue->insert_count; in tso_start_new_packet() 366 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb_tso() 370 if (tx_queue->tso_version != 1) in efx_enqueue_skb_tso() 378 EFX_WARN_ON_ONCE_PARANOID(tx_queue->write_count != tx_queue->insert_count); in efx_enqueue_skb_tso() [all …]
|
A D | tx_common.h | 14 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 15 void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 16 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 17 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 19 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 29 void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); 30 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 32 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 35 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, 38 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, [all …]
|
A D | efx_channels.c | 545 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel() 546 tx_queue->efx = efx; in efx_alloc_channel() 547 tx_queue->queue = -1; in efx_alloc_channel() 548 tx_queue->label = j; in efx_alloc_channel() 619 tx_queue = &channel->tx_queue[j]; in efx_copy_channel() 620 if (tx_queue->channel) in efx_copy_channel() 622 tx_queue->buffer = NULL; in efx_copy_channel() 624 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); in efx_copy_channel() 883 tx_queue->channel->channel, tx_queue->label, in efx_set_xdp_tx_queue() 936 tx_queue->queue); in efx_set_channels() [all …]
|
A D | mcdi_functions.c | 170 struct efx_channel *channel = tx_queue->channel; in efx_mcdi_tx_init() 171 struct efx_nic *efx = tx_queue->efx; in efx_mcdi_tx_init() 185 dma_addr = tx_queue->txd.buf.dma_addr; in efx_mcdi_tx_init() 188 tx_queue->queue, entries, (u64)dma_addr); in efx_mcdi_tx_init() 198 bool tso_v2 = tx_queue->tso_version == 2; in efx_mcdi_tx_init() 222 tx_queue->tso_version = 0; in efx_mcdi_tx_init() 243 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); in efx_mcdi_tx_remove() 250 struct efx_nic *efx = tx_queue->efx; in efx_mcdi_tx_fini() 255 tx_queue->queue); in efx_mcdi_tx_fini() 357 struct efx_tx_queue *tx_queue; in efx_fini_dmaq() local [all …]
|
A D | farch.c | 287 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc() 303 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc() 324 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in efx_farch_tx_write() 328 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_tx_write() 343 } while (tx_queue->write_count != tx_queue->insert_count); in efx_farch_tx_write() 351 ++tx_queue->pushes; in efx_farch_tx_write() 410 tx_queue->queue); in efx_farch_tx_init() 453 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); in efx_farch_tx_remove() 838 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event() 844 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event() [all …]
|
A D | net_driver.h | 1364 int (*tx_probe)(struct efx_tx_queue *tx_queue); 1365 void (*tx_init)(struct efx_tx_queue *tx_queue); 1566 for (_tx_queue = (_channel)->tx_queue; \ 1567 _tx_queue < (_channel)->tx_queue + \ 1655 struct efx_tx_queue *tx_queue; in efx_channel_tx_fill_level() local 1660 tx_queue->insert_count - tx_queue->read_count); in efx_channel_tx_fill_level() 1669 struct efx_tx_queue *tx_queue; in efx_channel_tx_old_fill_level() local 1674 tx_queue->insert_count - tx_queue->old_read_count); in efx_channel_tx_old_fill_level() 1695 return tx_queue->insert_count & tx_queue->ptr_mask; in efx_tx_queue_get_insert_index() 1702 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; in __efx_tx_queue_get_insert_buffer() [all …]
|
A D | selftest.c | 410 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback() 438 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback() 467 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback() 515 lb_tests->tx_done[tx_queue->label] += tx_done; in efx_end_loopback() 523 efx_test_loopback(struct efx_tx_queue *tx_queue, in efx_test_loopback() argument 526 struct efx_nic *efx = tx_queue->efx; in efx_test_loopback() 542 tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), in efx_test_loopback() 546 begin_rc = efx_begin_loopback(tx_queue); in efx_test_loopback() 618 struct efx_tx_queue *tx_queue; in efx_test_loopbacks() local 659 state->offload_csum = (tx_queue->type & in efx_test_loopbacks() [all …]
|
A D | ef10.c | 2190 tx_queue->type = tx_queue->label & 3; in efx_ef10_tx_probe() 2191 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, in efx_ef10_tx_probe() 2204 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_ef10_push_tx_desc() 2351 !tx_queue->timestamping && !tx_queue->xdp_tx) { in efx_ef10_tx_init() 2403 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_ef10_notify_tx_desc() 2436 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in efx_ef10_tx_write() 2440 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_ef10_tx_write() 2450 tx_queue->packet_write_count = tx_queue->write_count; in efx_ef10_tx_write() 2452 tx_queue->packet_write_count = tx_queue->write_count; in efx_ef10_tx_write() 2461 } while (tx_queue->write_count != tx_queue->insert_count); in efx_ef10_tx_write() [all …]
|
A D | ef100_tx.h | 17 int ef100_tx_probe(struct efx_tx_queue *tx_queue); 18 void ef100_tx_init(struct efx_tx_queue *tx_queue); 19 void ef100_tx_write(struct efx_tx_queue *tx_queue); 24 netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
A D | efx.h | 21 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 24 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 25 static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_enqueue_skb() argument 27 return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, in efx_enqueue_skb() 29 tx_queue, skb); in efx_enqueue_skb() 31 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 32 void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
|
/linux/drivers/net/ethernet/sfc/falcon/ |
A D | tx.c | 72 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in ef4_dequeue_buffer() 74 tx_queue->queue, tx_queue->read_count); in ef4_dequeue_buffer() 273 while (tx_queue->insert_count != tx_queue->write_count) { in ef4_enqueue_unwind() 363 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in ef4_dequeue_buffers() 380 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in ef4_dequeue_buffers() 525 if (tx_queue->read_count == tx_queue->old_write_count) { in ef4_xmit_done() 551 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in ef4_probe_tx_queue() 610 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in ef4_fini_tx_queue() 617 while (tx_queue->read_count != tx_queue->write_count) { in ef4_fini_tx_queue() 619 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in ef4_fini_tx_queue() [all …]
|
A D | nic.h | 71 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD) in ef4_tx_queue_partner() 72 return tx_queue - EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner() 74 return tx_queue + EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner() 104 tx_queue->empty_read_count = 0; in ef4_nic_may_push_tx_desc() 317 return tx_queue->efx->type->tx_probe(tx_queue); in ef4_nic_probe_tx() 321 tx_queue->efx->type->tx_init(tx_queue); in ef4_nic_init_tx() 325 tx_queue->efx->type->tx_remove(tx_queue); in ef4_nic_remove_tx() 329 tx_queue->efx->type->tx_write(tx_queue); in ef4_nic_push_buffers() 383 int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue); 384 void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue); [all …]
|
A D | farch.c | 277 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_notify_tx_desc() 293 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_push_tx_desc() 314 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in ef4_farch_tx_write() 318 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_tx_write() 333 } while (tx_queue->write_count != tx_queue->insert_count); in ef4_farch_tx_write() 341 ++tx_queue->pushes; in ef4_farch_tx_write() 404 tx_queue->queue); in ef4_farch_tx_init() 425 tx_queue->queue); in ef4_farch_tx_init() 451 tx_queue->queue); in ef4_farch_tx_fini() 460 ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd); in ef4_farch_tx_remove() [all …]
|
A D | net_driver.h | 445 struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; member 1082 int (*tx_probe)(struct ef4_tx_queue *tx_queue); 1083 void (*tx_init)(struct ef4_tx_queue *tx_queue); 1206 return &channel->tx_queue[type]; in ef4_channel_get_tx_queue() 1211 return !(tx_queue->efx->net_dev->num_tc < 2 && in ef4_tx_queue_used() 1212 tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI); in ef4_tx_queue_used() 1220 for (_tx_queue = (_channel)->tx_queue; \ 1230 for (_tx_queue = (_channel)->tx_queue; \ 1312 return tx_queue->insert_count & tx_queue->ptr_mask; in ef4_tx_queue_get_insert_index() 1319 return &tx_queue->buffer[ef4_tx_queue_get_insert_index(tx_queue)]; in __ef4_tx_queue_get_insert_buffer() [all …]
|
A D | selftest.c | 412 struct ef4_nic *efx = tx_queue->efx; in ef4_begin_loopback() 440 rc = ef4_enqueue_skb(tx_queue, skb); in ef4_begin_loopback() 469 struct ef4_nic *efx = tx_queue->efx; in ef4_end_loopback() 517 lb_tests->tx_done[tx_queue->queue] += tx_done; in ef4_end_loopback() 525 ef4_test_loopback(struct ef4_tx_queue *tx_queue, in ef4_test_loopback() argument 528 struct ef4_nic *efx = tx_queue->efx; in ef4_test_loopback() 544 tx_queue->queue, LOOPBACK_MODE(efx), in ef4_test_loopback() 548 begin_rc = ef4_begin_loopback(tx_queue); in ef4_test_loopback() 620 struct ef4_tx_queue *tx_queue; in ef4_test_loopbacks() local 661 state->offload_csum = (tx_queue->queue & in ef4_test_loopbacks() [all …]
|
A D | efx.h | 23 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue); 24 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue); 25 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue); 26 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue); 27 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue); 30 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); 31 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
|
A D | efx.c | 247 tx_queue->pkts_compl = 0; in ef4_process_channel() 248 tx_queue->bytes_compl = 0; in ef4_process_channel() 262 if (tx_queue->bytes_compl) { in ef4_process_channel() 264 tx_queue->pkts_compl, tx_queue->bytes_compl); in ef4_process_channel() 441 tx_queue = &channel->tx_queue[j]; in ef4_alloc_channel() 442 tx_queue->efx = efx; in ef4_alloc_channel() 478 tx_queue = &channel->tx_queue[j]; in ef4_copy_channel() 479 if (tx_queue->channel) in ef4_copy_channel() 481 tx_queue->buffer = NULL; in ef4_copy_channel() 482 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); in ef4_copy_channel() [all …]
|
/linux/drivers/net/ethernet/freescale/ |
A D | gianfar.c | 562 grp->tx_queue = priv->tx_queue[i]; in gfar_parse_group() 1134 tx_queue = priv->tx_queue[i]; in free_skb_resources() 1293 tx_queue = priv->tx_queue[i]; in gfar_init_bds() 1295 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds() 1296 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds() 1297 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds() 1362 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources() 1384 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources() 1785 tx_queue = priv->tx_queue[rq]; in gfar_start_xmit() 1955 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; in gfar_start_xmit() [all …]
|
/linux/drivers/net/wireless/rsi/ |
A D | rsi_91x_core.c | 36 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_determine_min_weight_queue() 60 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_recalculate_weights() 106 if (skb_queue_len(&common->tx_queue[q_num])) in rsi_get_num_pkts_dequeue() 107 skb = skb_peek(&common->tx_queue[q_num]); in rsi_get_num_pkts_dequeue() 149 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { in rsi_core_determine_hal_queue() 172 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_core_determine_hal_queue() 187 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() 200 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() 229 skb_queue_tail(&common->tx_queue[q_num], skb); in rsi_core_queue_pkt() 249 return skb_dequeue(&common->tx_queue[q_num]); in rsi_core_dequeue_pkt() [all …]
|
/linux/drivers/staging/wfx/ |
A D | queue.c | 71 skb_queue_head_init(&wvif->tx_queue[i].normal); in wfx_tx_queues_init() 72 skb_queue_head_init(&wvif->tx_queue[i].cab); in wfx_tx_queues_init() 73 wvif->tx_queue[i].priority = priorities[i]; in wfx_tx_queues_init() 88 WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames)); in wfx_tx_queues_check_empty() 89 WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i])); in wfx_tx_queues_check_empty() 117 struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_tx_queues_put() 139 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_pending_drop() 165 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_pending_get() 224 if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) in wfx_tx_queues_has_cab() 247 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() [all …]
|
/linux/drivers/net/wireless/ath/ath5k/ |
A D | dma.c | 132 u32 tx_queue; in ath5k_hw_start_tx_dma() local 141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_start_tx_dma() 148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; in ath5k_hw_start_tx_dma() 151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma() 156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma() 164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); in ath5k_hw_start_tx_dma() 191 u32 tx_queue, pending; in ath5k_hw_stop_tx_dma() local 200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_stop_tx_dma() 207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; in ath5k_hw_stop_tx_dma() 212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; in ath5k_hw_stop_tx_dma() [all …]
|