/linux/drivers/net/ethernet/sun/ |
A D | sungem.h | 883 #define TX_RING_SIZE 128 macro 886 #if TX_RING_SIZE == 32 888 #elif TX_RING_SIZE == 64 890 #elif TX_RING_SIZE == 128 892 #elif TX_RING_SIZE == 256 894 #elif TX_RING_SIZE == 512 896 #elif TX_RING_SIZE == 1024 898 #elif TX_RING_SIZE == 2048 900 #elif TX_RING_SIZE == 4096 902 #elif TX_RING_SIZE == 8192 [all …]
|
A D | sunhme.h | 331 #define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ macro 334 #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0) 335 #error TX_RING_SIZE holds illegal value 361 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) 363 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) 367 (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \ 418 struct sk_buff *tx_skbs[TX_RING_SIZE];
|
A D | sunbmac.h | 251 #define TX_RING_SIZE 256 macro 255 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) 257 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) 261 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \ 300 struct sk_buff *tx_skbs[TX_RING_SIZE];
|
A D | sunqe.h | 291 #define TX_RING_SIZE 16 macro 301 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ 326 u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
|
/linux/drivers/net/ethernet/amd/ |
A D | ariadne.c | 80 #define TX_RING_SIZE 5 macro 90 volatile u_short *tx_buff[TX_RING_SIZE]; 100 struct TDRE tx_ring[TX_RING_SIZE]; 129 for (i = 0; i < TX_RING_SIZE; i++) { in ariadne_init_ring() 309 int entry = dirty_tx % TX_RING_SIZE; in ariadne_interrupt() 349 dirty_tx += TX_RING_SIZE; in ariadne_interrupt() 576 entry = priv->cur_tx % TX_RING_SIZE; in ariadne_start_xmit() 597 if ((priv->cur_tx >= TX_RING_SIZE) && in ariadne_start_xmit() 598 (priv->dirty_tx >= TX_RING_SIZE)) { in ariadne_start_xmit() 603 priv->cur_tx -= TX_RING_SIZE; in ariadne_start_xmit() [all …]
|
A D | 7990.h | 39 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro 41 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE]; 90 volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
|
A D | atarilance.c | 111 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro 113 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 157 struct lance_tx_head tx_head[TX_RING_SIZE]; 701 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring() 752 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_tx_timeout() 826 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { in lance_start_xmit() 827 lp->cur_tx -= TX_RING_SIZE; in lance_start_xmit() 828 lp->dirty_tx -= TX_RING_SIZE; in lance_start_xmit() 917 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt() 921 dirty_tx += TX_RING_SIZE; in lance_interrupt() [all …]
|
A D | lance.c | 193 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro 194 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 238 struct lance_tx_head tx_ring[TX_RING_SIZE]; 242 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 852 for (i = 0; i < TX_RING_SIZE; i++) { in lance_purge_ring() 889 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring() 938 for (i = 0; i < TX_RING_SIZE; i++) in lance_tx_timeout() 1013 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit() 1096 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt() 1100 dirty_tx += TX_RING_SIZE; in lance_interrupt() [all …]
|
A D | sun3lance.c | 94 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro 96 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 142 struct lance_tx_head tx_head[TX_RING_SIZE]; 145 char tx_data[TX_RING_SIZE][PKT_BUF_SZ]; 459 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring() 547 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_start_xmit()
|
A D | declance.c | 156 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro 157 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 223 struct lance_tx_desc btx_ring[TX_RING_SIZE]; 268 char *tx_buf_ptr_cpu[TX_RING_SIZE]; 272 uint tx_buf_ptr_lnc[TX_RING_SIZE]; 495 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring() 1094 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe() 1139 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe() 1170 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
|
/linux/drivers/net/ethernet/packetengines/ |
A D | yellowfin.c | 73 #define TX_RING_SIZE 16 macro 312 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 705 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_tx_timeout() 766 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring() 777 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring() 826 entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit() 847 if (entry >= TX_RING_SIZE-1) { in yellowfin_start_xmit() 1008 dirty_tx += TX_RING_SIZE; in yellowfin_interrupt() 1231 for (i = 0; i < TX_RING_SIZE*2; i++) in yellowfin_close() 1237 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_close() [all …]
|
A D | hamachi.c | 119 #define TX_RING_SIZE 64 macro 487 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 1017 if (entry >= TX_RING_SIZE-1) in hamachi_tx() 1067 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_tx_timeout() 1090 for (i = 0; i < TX_RING_SIZE; i++){ in hamachi_tx_timeout() 1093 if (i >= TX_RING_SIZE - 1) in hamachi_tx_timeout() 1206 for (i = 0; i < TX_RING_SIZE; i++) { in hamachi_init_ring() 1243 entry = hmp->cur_tx % TX_RING_SIZE; in hamachi_start_xmit() 1357 if (entry >= TX_RING_SIZE-1) in hamachi_interrupt() 1688 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_close() [all …]
|
/linux/drivers/net/ethernet/pasemi/ |
A D | pasemi_mac.h | 19 #define TX_RING_SIZE 4096 macro 20 #define CS_RING_SIZE (TX_RING_SIZE*2) 94 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) 95 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
|
/linux/drivers/net/ethernet/dlink/ |
A D | dl2k.h | 35 #define TX_RING_SIZE 256 macro 36 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/ 38 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) 369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
|
A D | sundance.c | 65 #define TX_RING_SIZE 32 macro 366 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 977 for (i=0; i<TX_RING_SIZE; i++) { in tx_timeout() 990 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout() 1055 for (i = 0; i < TX_RING_SIZE; i++) { in init_ring() 1070 int entry = np->cur_task % TX_RING_SIZE; in tx_poll() 1094 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 1148 for (i = 0; i < TX_RING_SIZE; i++) { in reset_tx() 1269 TX_RING_SIZE) in intr_handler() 1871 for (i = 0; i < TX_RING_SIZE; i++) in netdev_close() [all …]
|
A D | dl2k.c | 210 else if (tx_coalesce > TX_RING_SIZE-1) in rio_probe1() 211 tx_coalesce = TX_RING_SIZE - 1; in rio_probe1() 450 for (i = 0; i < TX_RING_SIZE; i++) { in free_list() 471 for (i = 0; i < TX_RING_SIZE; i++) in rio_reset_ring() 488 for (i = 0; i < TX_RING_SIZE; i++) { in alloc_list() 491 ((i + 1) % TX_RING_SIZE) * in alloc_list() 718 entry = np->cur_tx % TX_RING_SIZE; in start_xmit() 755 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE in start_xmit() 816 int entry = np->old_tx % TX_RING_SIZE; in rio_free_tx() 841 entry = (entry + 1) % TX_RING_SIZE; in rio_free_tx() [all …]
|
/linux/drivers/net/ethernet/dec/tulip/ |
A D | tulip_core.c | 587 for (i = 0; i < TX_RING_SIZE; i++) in tulip_tx_timeout() 643 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_init_ring() 664 entry = tp->cur_tx % TX_RING_SIZE; in tulip_start_xmit() 682 if (entry == TX_RING_SIZE-1) in tulip_start_xmit() 707 int entry = dirty_tx % TX_RING_SIZE; in tulip_clean_tx_ring() 805 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_free_ring() 1137 entry = tp->cur_tx++ % TX_RING_SIZE; in set_rx_mode() 1148 entry = tp->cur_tx++ % TX_RING_SIZE; in set_rx_mode() 1159 if (entry == TX_RING_SIZE-1) in set_rx_mode() 1776 sizeof(struct tulip_tx_desc) * TX_RING_SIZE, in tulip_init_one() [all …]
|
A D | interrupt.c | 533 int maxtx = TX_RING_SIZE; in tulip_interrupt() 534 int maxoi = TX_RING_SIZE; in tulip_interrupt() 597 int entry = dirty_tx % TX_RING_SIZE; in tulip_interrupt() 652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { in tulip_interrupt() 656 dirty_tx += TX_RING_SIZE; in tulip_interrupt() 660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) in tulip_interrupt()
|
A D | winbond-840.c | 288 dma_addr_t tx_addr[TX_RING_SIZE]; 293 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 818 for (i = 0; i < TX_RING_SIZE; i++) { in init_rxtx_rings() 845 for (i = 0; i < TX_RING_SIZE; i++) { in free_rxtx_rings() 929 for (i = 0; i < TX_RING_SIZE; i++) in tx_timeout() 968 sizeof(struct w840_tx_desc) * TX_RING_SIZE, in alloc_ringdesc() 980 sizeof(struct w840_tx_desc) * TX_RING_SIZE, in free_ringdesc() 994 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 1009 if(entry == TX_RING_SIZE-1) in start_tx() 1051 int entry = np->dirty_tx % TX_RING_SIZE; in netdev_tx_done() [all …]
|
/linux/drivers/net/ethernet/3com/ |
A D | 3c515.c | 51 #define TX_RING_SIZE 16 macro 300 struct boom_tx_desc tx_ring[TX_RING_SIZE]; 303 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 833 for (i = 0; i < TX_RING_SIZE; i++) in corkscrew_open() 971 for (i = 0; i < TX_RING_SIZE; i++) { in corkscrew_timeout() 1001 int entry = vp->cur_tx % TX_RING_SIZE; in corkscrew_start_xmit() 1009 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; in corkscrew_start_xmit() 1039 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) in corkscrew_start_xmit() 1164 int entry = dirty_tx % TX_RING_SIZE; in corkscrew_interrupt() 1175 if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { in corkscrew_interrupt() [all …]
|
A D | 3c59x.c | 38 #define TX_RING_SIZE 16 macro 603 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 1478 sizeof(struct boom_tx_desc) * TX_RING_SIZE, in vortex_probe1() 1685 for (i = 0; i < TX_RING_SIZE; i++) in vortex_up() 2116 int entry = vp->cur_tx % TX_RING_SIZE; in boomerang_start_xmit() 2422 int entry = dirty_tx % TX_RING_SIZE; in _boomerang_interrupt() 2764 for (i = 0; i < TX_RING_SIZE; i++) { in vortex_close() 2800 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, in dump_tx_ring() 2801 vp->cur_tx, vp->cur_tx % TX_RING_SIZE); in dump_tx_ring() 2806 for (i = 0; i < TX_RING_SIZE; i++) { in dump_tx_ring() [all …]
|
/linux/drivers/net/ethernet/adaptec/ |
A D | starfire.c | 121 #define TX_RING_SIZE 32 macro 531 struct tx_ring_info tx_info[TX_RING_SIZE]; 1173 for (i = 0; i < TX_RING_SIZE; i++) in init_ring() 1203 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 1211 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { in start_tx() 1250 np->tx_info[entry].used_slots = TX_RING_SIZE - entry; in start_tx() 1259 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) in start_tx() 1272 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) in start_tx() 1278 entry = prev_tx % TX_RING_SIZE; in start_tx() 1391 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { in intr_handler() [all …]
|
/linux/drivers/net/ethernet/via/ |
A D | via-rhine.c | 77 #define TX_RING_SIZE 64 macro 446 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 447 dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; 450 unsigned char *tx_buf[TX_RING_SIZE]; 1147 TX_RING_SIZE * sizeof(struct tx_desc), in alloc_ring() 1156 PKT_BUF_SZ * TX_RING_SIZE, in alloc_ring() 1183 TX_RING_SIZE * sizeof(struct tx_desc), in free_ring() 1309 for (i = 0; i < TX_RING_SIZE; i++) { in alloc_tbufs() 1329 for (i = 0; i < TX_RING_SIZE; i++) { in free_tbufs() 1781 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx() [all …]
|
/linux/drivers/net/ethernet/smsc/ |
A D | smsc9420.c | 497 for (i = 0; i < TX_RING_SIZE; i++) { in smsc9420_free_tx_ring() 920 pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; in smsc9420_complete_tx() 932 (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); in smsc9420_hard_start_xmit() 959 if (unlikely(index == (TX_RING_SIZE - 1))) in smsc9420_hard_start_xmit() 967 pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; in smsc9420_hard_start_xmit() 1182 pd->tx_buffers = kmalloc_array(TX_RING_SIZE, in smsc9420_alloc_tx_ring() 1189 for (i = 0; i < TX_RING_SIZE; i++) { in smsc9420_alloc_tx_ring() 1197 pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; in smsc9420_alloc_tx_ring() 1550 sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE), in smsc9420_probe() 1608 sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE), in smsc9420_probe() [all …]
|
A D | epic100.c | 53 #define TX_RING_SIZE 256 macro 56 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) 256 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 815 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); in epic_restart() 937 for (i = 0; i < TX_RING_SIZE; i++) { in epic_init_ring() 963 entry = ep->cur_tx % TX_RING_SIZE; in epic_start_xmit() 1033 int entry = dirty_tx % TX_RING_SIZE; in epic_tx() 1056 if (cur_tx - dirty_tx > TX_RING_SIZE) { in epic_tx() 1059 dirty_tx += TX_RING_SIZE; in epic_tx() 1316 for (i = 0; i < TX_RING_SIZE; i++) { in epic_close()
|