Home
last modified time | relevance | path

Searched refs:TX_RING_SIZE (Results 1 - 25 of 100) sorted by relevance

1234

/kernel/linux/linux-5.10/drivers/net/ethernet/sun/
H A Dsungem.h883 #define TX_RING_SIZE 128 macro
886 #if TX_RING_SIZE == 32
888 #elif TX_RING_SIZE == 64
890 #elif TX_RING_SIZE == 128
892 #elif TX_RING_SIZE == 256
894 #elif TX_RING_SIZE == 512
896 #elif TX_RING_SIZE == 1024
898 #elif TX_RING_SIZE == 2048
900 #elif TX_RING_SIZE == 4096
902 #elif TX_RING_SIZE
[all...]
H A Dsunhme.h331 #define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ macro
334 #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
335 #error TX_RING_SIZE holds illegal value
361 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
363 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
367 (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
418 struct sk_buff *tx_skbs[TX_RING_SIZE];
H A Dsunbmac.h251 #define TX_RING_SIZE 256 macro
255 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
257 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
261 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
300 struct sk_buff *tx_skbs[TX_RING_SIZE];
/kernel/linux/linux-6.6/drivers/net/ethernet/sun/
H A Dsungem.h883 #define TX_RING_SIZE 128 macro
886 #if TX_RING_SIZE == 32
888 #elif TX_RING_SIZE == 64
890 #elif TX_RING_SIZE == 128
892 #elif TX_RING_SIZE == 256
894 #elif TX_RING_SIZE == 512
896 #elif TX_RING_SIZE == 1024
898 #elif TX_RING_SIZE == 2048
900 #elif TX_RING_SIZE == 4096
902 #elif TX_RING_SIZE
[all...]
H A Dsunhme.h331 #define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ macro
334 #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
335 #error TX_RING_SIZE holds illegal value
361 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
363 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
367 (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
418 struct sk_buff *tx_skbs[TX_RING_SIZE];
H A Dsunbmac.h251 #define TX_RING_SIZE 256 macro
255 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
257 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
261 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
300 struct sk_buff *tx_skbs[TX_RING_SIZE];
/kernel/linux/linux-5.10/drivers/net/ethernet/amd/
H A Dariadne.c80 #define TX_RING_SIZE 5 macro
88 volatile struct TDRE *tx_ring[TX_RING_SIZE];
90 volatile u_short *tx_buff[TX_RING_SIZE];
100 struct TDRE tx_ring[TX_RING_SIZE];
102 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
129 for (i = 0; i < TX_RING_SIZE; i++) { in ariadne_init_ring()
309 int entry = dirty_tx % TX_RING_SIZE; in ariadne_interrupt()
345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { in ariadne_interrupt()
349 dirty_tx += TX_RING_SIZE; in ariadne_interrupt()
354 dirty_tx > priv->cur_tx - TX_RING_SIZE in ariadne_interrupt()
[all...]
H A D7990.h39 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro
41 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
90 volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
H A Datarilance.c111 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
113 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
157 struct lance_tx_head tx_head[TX_RING_SIZE];
705 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring()
756 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_tx_timeout()
830 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { in lance_start_xmit()
831 lp->cur_tx -= TX_RING_SIZE; in lance_start_xmit()
832 lp->dirty_tx -= TX_RING_SIZE; in lance_start_xmit()
921 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
[all...]
H A Dlance.c193 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
194 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
238 struct lance_tx_head tx_ring[TX_RING_SIZE];
242 struct sk_buff* tx_skbuff[TX_RING_SIZE];
559 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ, in lance_probe1()
850 for (i = 0; i < TX_RING_SIZE; i++) { in lance_purge_ring()
887 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring()
936 for (i = 0; i < TX_RING_SIZE; i++) in lance_tx_timeout()
1011 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit()
1094 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/amd/
H A Dariadne.c80 #define TX_RING_SIZE 5 macro
88 volatile struct TDRE *tx_ring[TX_RING_SIZE];
90 volatile u_short *tx_buff[TX_RING_SIZE];
100 struct TDRE tx_ring[TX_RING_SIZE];
102 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
129 for (i = 0; i < TX_RING_SIZE; i++) { in ariadne_init_ring()
309 int entry = dirty_tx % TX_RING_SIZE; in ariadne_interrupt()
345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { in ariadne_interrupt()
349 dirty_tx += TX_RING_SIZE; in ariadne_interrupt()
354 dirty_tx > priv->cur_tx - TX_RING_SIZE in ariadne_interrupt()
[all...]
H A D7990.h39 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro
41 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
90 volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
H A Datarilance.c111 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
113 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
157 struct lance_tx_head tx_head[TX_RING_SIZE];
704 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring()
755 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_tx_timeout()
829 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { in lance_start_xmit()
830 lp->cur_tx -= TX_RING_SIZE; in lance_start_xmit()
831 lp->dirty_tx -= TX_RING_SIZE; in lance_start_xmit()
920 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
[all...]
H A Dlance.c194 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
195 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
239 struct lance_tx_head tx_ring[TX_RING_SIZE];
243 struct sk_buff* tx_skbuff[TX_RING_SIZE];
564 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ, in lance_probe1()
855 for (i = 0; i < TX_RING_SIZE; i++) { in lance_purge_ring()
892 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring()
941 for (i = 0; i < TX_RING_SIZE; i++) in lance_tx_timeout()
1016 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit()
1099 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/pasemi/
H A Dpasemi_mac.h19 #define TX_RING_SIZE 4096 macro
20 #define CS_RING_SIZE (TX_RING_SIZE*2)
94 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
95 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
/kernel/linux/linux-6.6/drivers/net/ethernet/pasemi/
H A Dpasemi_mac.h19 #define TX_RING_SIZE 4096 macro
20 #define CS_RING_SIZE (TX_RING_SIZE*2)
94 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
95 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
/kernel/linux/linux-5.10/drivers/net/ethernet/packetengines/
H A Dyellowfin.c73 #define TX_RING_SIZE 16 macro
74 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
76 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
77 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
157 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
312 struct sk_buff* tx_skbuff[TX_RING_SIZE];
703 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_tx_timeout()
764 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring()
768 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); in yellowfin_init_ring()
775 for (i = 0; i < TX_RING_SIZE; in yellowfin_init_ring()
[all...]
H A Dhamachi.c119 #define TX_RING_SIZE 64 macro
121 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
232 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
487 struct sk_buff* tx_skbuff[TX_RING_SIZE];
998 int entry = hmp->dirty_tx % TX_RING_SIZE; in hamachi_tx()
1013 if (entry >= TX_RING_SIZE-1) in hamachi_tx()
1014 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= in hamachi_tx()
1063 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_tx_timeout()
1086 for (i = 0; i < TX_RING_SIZE; i++){ in hamachi_tx_timeout()
1089 if (i >= TX_RING_SIZE in hamachi_tx_timeout()
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/packetengines/
H A Dyellowfin.c73 #define TX_RING_SIZE 16 macro
74 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
76 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
77 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
157 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
312 struct sk_buff* tx_skbuff[TX_RING_SIZE];
705 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_tx_timeout()
766 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring()
770 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); in yellowfin_init_ring()
777 for (i = 0; i < TX_RING_SIZE; in yellowfin_init_ring()
[all...]
H A Dhamachi.c119 #define TX_RING_SIZE 64 macro
121 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
232 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
487 struct sk_buff* tx_skbuff[TX_RING_SIZE];
1002 int entry = hmp->dirty_tx % TX_RING_SIZE; in hamachi_tx()
1017 if (entry >= TX_RING_SIZE-1) in hamachi_tx()
1018 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= in hamachi_tx()
1067 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_tx_timeout()
1090 for (i = 0; i < TX_RING_SIZE; i++){ in hamachi_tx_timeout()
1093 if (i >= TX_RING_SIZE in hamachi_tx_timeout()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/dlink/
H A Ddl2k.h35 #define TX_RING_SIZE 256 macro
36 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
38 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
H A Dsundance.c65 #define TX_RING_SIZE 32 macro
66 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
69 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
128 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
366 struct sk_buff* tx_skbuff[TX_RING_SIZE];
975 for (i=0; i<TX_RING_SIZE; i++) { in tx_timeout()
988 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
989 np->dirty_tx, np->dirty_tx % TX_RING_SIZE); in tx_timeout()
1053 for (i = 0; i < TX_RING_SIZE; i++) { in init_ring()
1062 unsigned head = np->cur_task % TX_RING_SIZE; in tx_poll()
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/dlink/
H A Ddl2k.h35 #define TX_RING_SIZE 256 macro
36 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
38 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
H A Dsundance.c65 #define TX_RING_SIZE 32 macro
66 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
69 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
128 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
366 struct sk_buff* tx_skbuff[TX_RING_SIZE];
977 for (i=0; i<TX_RING_SIZE; i++) { in tx_timeout()
990 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
991 np->dirty_tx, np->dirty_tx % TX_RING_SIZE); in tx_timeout()
1055 for (i = 0; i < TX_RING_SIZE; i++) { in init_ring()
1064 unsigned head = np->cur_task % TX_RING_SIZE; in tx_poll()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/dec/tulip/
H A Dtulip_core.c587 for (i = 0; i < TX_RING_SIZE; i++) in tulip_tx_timeout()
643 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_init_ring()
664 entry = tp->cur_tx % TX_RING_SIZE; in tulip_start_xmit()
672 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ in tulip_start_xmit()
674 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { in tulip_start_xmit()
676 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { in tulip_start_xmit()
682 if (entry == TX_RING_SIZE-1) in tulip_start_xmit()
707 int entry = dirty_tx % TX_RING_SIZE; in tulip_clean_tx_ring()
805 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_free_ring()
1129 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE in set_rx_mode()
[all...]

Completed in 39 milliseconds

1234