/kernel/linux/linux-6.6/drivers/net/ethernet/google/gve/ |
H A D | gve_utils.c | 71 page_info->pagecnt_bias--; in gve_dec_pagecnt_bias() 72 if (page_info->pagecnt_bias == 0) { in gve_dec_pagecnt_bias() 78 page_info->pagecnt_bias = INT_MAX - pagecount; in gve_dec_pagecnt_bias()
|
H A D | gve_rx.c | 22 page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); in gve_rx_free_buffer() 38 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_unfill_pages() 44 rx->qpl_copy_pool[i].pagecnt_bias - 1); in gve_rx_unfill_pages() 91 page_info->pagecnt_bias = INT_MAX; in gve_setup_rx_buffer() 167 rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; in gve_prefill_rx_pages() 177 rx->qpl_copy_pool[j].pagecnt_bias - 1); in gve_prefill_rx_pages() 186 rx->data.page_info[i].pagecnt_bias - 1); in gve_prefill_rx_pages() 420 if (pagecount == page_info->pagecnt_bias) in gve_rx_can_recycle_buffer() 423 else if (pagecount > page_info->pagecnt_bias) in gve_rx_can_recycle_buffer() 425 WARN(pagecount < page_info->pagecnt_bias, in gve_rx_can_recycle_buffer() [all...] |
H A D | gve_rx_dqo.c | 21 return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias; in gve_buf_ref_cnt() 28 page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1); in gve_free_page_dqo() 197 buf_state->page_info.pagecnt_bias = INT_MAX; in gve_alloc_page_dqo()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/ |
H A D | ice_txrx.c | 405 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_clean_rx_ring() 662 bi->pagecnt_bias = USHRT_MAX; in ice_alloc_mapped_page() 776 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; in ice_can_reuse_rx_page() local 785 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) in ice_can_reuse_rx_page() 795 * the pagecnt_bias and page count so that we fully restock the in ice_can_reuse_rx_page() 798 if (unlikely(pagecnt_bias == 1)) { in ice_can_reuse_rx_page() 800 rx_buf->pagecnt_bias = USHRT_MAX; in ice_can_reuse_rx_page() 862 new_buf->pagecnt_bias = old_buf->pagecnt_bias; in ice_reuse_rx_page() [all...] |
H A D | ice_txrx.h | 172 u16 pagecnt_bias; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_txrx.c | 694 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in iavf_clean_rx_ring() 851 /* initialize pagecnt_bias to 1 representing we fully own page */ in iavf_alloc_mapped_page() 852 bi->pagecnt_bias = 1; in iavf_alloc_mapped_page() 1144 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in iavf_reuse_rx_page() 1189 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in iavf_can_reuse_rx_page() local 1198 if (unlikely((page_count(page) - pagecnt_bias) > 1)) in iavf_can_reuse_rx_page() 1208 * the pagecnt_bias and page count so that we fully restock the in iavf_can_reuse_rx_page() 1211 if (unlikely(!pagecnt_bias)) { in iavf_can_reuse_rx_page() [all...] |
H A D | iavf_txrx.h | 282 __u16 pagecnt_bias; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_txrx.c | 725 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in iavf_clean_rx_ring() 882 /* initialize pagecnt_bias to 1 representing we fully own page */ in iavf_alloc_mapped_page() 883 bi->pagecnt_bias = 1; in iavf_alloc_mapped_page() 1178 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in iavf_reuse_rx_page() 1210 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in iavf_can_reuse_rx_page() local 1219 if (unlikely((page_count(page) - pagecnt_bias) > 1)) in iavf_can_reuse_rx_page() 1229 * the pagecnt_bias and page count so that we fully restock the in iavf_can_reuse_rx_page() 1232 if (unlikely(!pagecnt_bias)) { in iavf_can_reuse_rx_page() [all...] |
H A D | iavf_txrx.h | 282 __u16 pagecnt_bias; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
H A D | ice_txrx.c | 422 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_clean_rx_ring() 726 bi->pagecnt_bias = USHRT_MAX; in ice_alloc_mapped_page() 830 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; in ice_can_reuse_rx_page() local 839 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) in ice_can_reuse_rx_page() 849 * the pagecnt_bias and page count so that we fully restock the in ice_can_reuse_rx_page() 852 if (unlikely(pagecnt_bias == 1)) { in ice_can_reuse_rx_page() 854 rx_buf->pagecnt_bias = USHRT_MAX; in ice_can_reuse_rx_page() 930 new_buf->pagecnt_bias = old_buf->pagecnt_bias; in ice_reuse_rx_page() [all...] |
H A D | ice_txrx_lib.h | 35 /* adjust pagecnt_bias on frags freed by XDP prog */ in ice_set_rx_bufs_act() 45 buf->pagecnt_bias--; in ice_set_rx_bufs_act()
|
H A D | ice_txrx.h | 205 unsigned int pagecnt_bias; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_txrx.c | 1227 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in i40e_reuse_rx_page() 1357 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in i40e_clean_rx_ring() 1540 bi->pagecnt_bias = USHRT_MAX; in i40e_alloc_mapped_page() 1872 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in i40e_can_reuse_rx_page() local 1881 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in i40e_can_reuse_rx_page() 1891 * the pagecnt_bias and page count so that we fully restock the in i40e_can_reuse_rx_page() 1894 if (unlikely(pagecnt_bias == 1)) { in i40e_can_reuse_rx_page() 1896 rx_buffer->pagecnt_bias in i40e_can_reuse_rx_page() [all...] |
H A D | i40e_txrx.h | 279 __u16 pagecnt_bias; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_txrx.c | 1384 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in i40e_reuse_rx_page() 1507 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in i40e_clean_rx_ring() 1672 bi->pagecnt_bias = USHRT_MAX; in i40e_alloc_mapped_page() 1973 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in i40e_can_reuse_rx_page() local 1984 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) { in i40e_can_reuse_rx_page() 1998 * the pagecnt_bias and page count so that we fully restock the in i40e_can_reuse_rx_page() 2001 if (unlikely(pagecnt_bias == 1)) { in i40e_can_reuse_rx_page() 2003 rx_buffer->pagecnt_bias in i40e_can_reuse_rx_page() [all...] |
H A D | i40e_txrx.h | 279 __u16 pagecnt_bias; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ixgbevf/ |
H A D | ixgbevf_main.c | 543 rx_buffer->pagecnt_bias--; in ixgbevf_get_rx_buffer() 565 rx_buffer->pagecnt_bias); in ixgbevf_put_rx_buffer() 639 bi->pagecnt_bias = 1; in ixgbevf_alloc_mapped_page() 781 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbevf_reuse_rx_page() 791 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbevf_can_reuse_rx_page() local 800 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in ixgbevf_can_reuse_rx_page() 812 * the pagecnt_bias and page count so that we fully restock the in ixgbevf_can_reuse_rx_page() 815 if (unlikely(!pagecnt_bias)) { in ixgbevf_can_reuse_rx_page() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ixgbevf/ |
H A D | ixgbevf_main.c | 543 rx_buffer->pagecnt_bias--; in ixgbevf_get_rx_buffer() 565 rx_buffer->pagecnt_bias); in ixgbevf_put_rx_buffer() 639 bi->pagecnt_bias = 1; in ixgbevf_alloc_mapped_page() 781 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbevf_reuse_rx_page() 786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbevf_can_reuse_rx_page() local 795 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in ixgbevf_can_reuse_rx_page() 807 * the pagecnt_bias and page count so that we fully restock the in ixgbevf_can_reuse_rx_page() 810 if (unlikely(!pagecnt_bias)) { in ixgbevf_can_reuse_rx_page() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mediatek/ |
H A D | mtk_wed_wo.c | 307 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_tx_clean() 329 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_rx_clean()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/igc/ |
H A D | igc_main.c | 376 buffer_info->pagecnt_bias); in igc_clean_rx_ring() 1644 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer() 1763 rx_buffer->pagecnt_bias++; in igc_construct_skb() 1795 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page() 1805 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page() local 1814 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in igc_can_reuse_rx_page() 1825 * the pagecnt_bias and page count so that we fully restock the in igc_can_reuse_rx_page() 1828 if (unlikely(!pagecnt_bias)) { in igc_can_reuse_rx_page() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | mm_types.h | 268 unsigned int pagecnt_bias; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/hisilicon/hns3/ |
H A D | hns3_enet.c | 2383 cb->pagecnt_bias = USHRT_MAX; in hns3_alloc_buffer() 2393 else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) in hns3_free_buffer() 2394 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); in hns3_free_buffer() 2697 return (page_count(cb->priv) - cb->pagecnt_bias) == 1; in hns3_can_reuse_page() 2708 desc_cb->pagecnt_bias--; in hns3_nic_reuse_page() 2717 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); in hns3_nic_reuse_page() 2729 } else if (desc_cb->pagecnt_bias) { in hns3_nic_reuse_page() 2730 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); in hns3_nic_reuse_page() 2734 if (unlikely(!desc_cb->pagecnt_bias)) { in hns3_nic_reuse_page() 2736 desc_cb->pagecnt_bias in hns3_nic_reuse_page() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/igb/ |
H A D | igb_main.c | 4967 buffer_info->pagecnt_bias); in igb_clean_rx_ring() 8258 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igb_reuse_rx_page() 8269 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igb_can_reuse_rx_page() local 8278 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) in igb_can_reuse_rx_page() 8289 * the pagecnt_bias and page count so that we fully restock the in igb_can_reuse_rx_page() 8292 if (unlikely(pagecnt_bias == 1)) { in igb_can_reuse_rx_page() 8294 rx_buffer->pagecnt_bias = USHRT_MAX; in igb_can_reuse_rx_page() 8380 rx_buffer->pagecnt_bias in igb_construct_skb() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/igb/ |
H A D | igb_main.c | 5053 buffer_info->pagecnt_bias); in igb_clean_rx_ring() 8435 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igb_reuse_rx_page() 8441 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igb_can_reuse_rx_page() local 8450 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) in igb_can_reuse_rx_page() 8461 * the pagecnt_bias and page count so that we fully restock the in igb_can_reuse_rx_page() 8464 if (unlikely(pagecnt_bias == 1)) { in igb_can_reuse_rx_page() 8466 rx_buffer->pagecnt_bias = USHRT_MAX; in igb_can_reuse_rx_page() 8548 rx_buffer->pagecnt_bias in igb_construct_skb() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/igc/ |
H A D | igc_main.c | 435 buffer_info->pagecnt_bias); in igc_clean_rx_ring_page_shared() 1877 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer() 2010 rx_buffer->pagecnt_bias++; in igc_construct_skb() 2042 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page() 2048 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page() local 2057 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page() 2068 * the pagecnt_bias and page count so that we fully restock the in igc_can_reuse_rx_page() 2071 if (unlikely(pagecnt_bias in igc_can_reuse_rx_page() [all...] |