Lines Matching defs:efx

14 #include "efx.h"
36 struct efx_nic *efx = rx_queue->efx;
59 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
60 PAGE_SIZE << efx->rx_buffer_order,
77 struct efx_nic *efx = rx_queue->efx;
101 efx_unmap_rx_buffer(efx, rx_buf);
135 struct efx_nic *efx = rx_queue->efx;
137 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
139 efx->rx_bufs_per_page);
150 struct efx_nic *efx = rx_queue->efx;
165 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
166 PAGE_SIZE << efx->rx_buffer_order,
183 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
191 struct efx_nic *efx = rx_queue->efx;
196 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
200 netif_dbg(efx, probe, efx->net_dev,
202 efx_rx_queue_index(rx_queue), efx->rxq_entries,
223 struct efx_nic *efx = rx_queue->efx;
226 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
244 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
246 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
260 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
264 netif_err(efx, rx_err, efx->net_dev,
267 efx->xdp_rxq_info_failed = true;
281 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
309 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
321 void efx_unmap_rx_buffer(struct efx_nic *efx,
329 dma_unmap_page(&efx->pci_dev->dev,
331 PAGE_SIZE << efx->rx_buffer_order,
375 struct efx_nic *efx = rx_queue->efx;
387 efx->rx_buffer_order);
391 dma_map_page(&efx->pci_dev->dev, page, 0,
392 PAGE_SIZE << efx->rx_buffer_order,
394 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
396 __free_pages(page, efx->rx_buffer_order);
412 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
415 rx_buf->page_offset = page_offset + efx->rx_ip_align +
417 rx_buf->len = efx->rx_dma_len;
421 dma_addr += efx->rx_page_buf_step;
422 page_offset += efx->rx_page_buf_step;
423 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
426 } while (++count < efx->rx_pages_per_batch);
431 void efx_rx_config_page_split(struct efx_nic *efx)
433 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
436 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
438 efx->rx_page_buf_step);
439 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
440 efx->rx_bufs_per_page;
441 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
442 efx->rx_bufs_per_page);
458 struct efx_nic *efx = rx_queue->efx;
467 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
477 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
481 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
496 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
514 struct efx_nic *efx = channel->efx;
526 if (efx->net_dev->features & NETIF_F_RXHASH &&
527 efx_rx_buf_hash_valid(efx, eh))
528 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
552 skb->truesize += n_frags * efx->rx_buffer_truesize;
562 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
564 struct list_head *head = &efx->rss_context.list;
568 WARN_ON(!mutex_is_locked(&efx->rss_lock));
595 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
597 struct list_head *head = &efx->rss_context.list;
600 WARN_ON(!mutex_is_locked(&efx->rss_lock));
614 void efx_set_default_rx_indir_table(struct efx_nic *efx,
621 ethtool_rxfh_indir_default(i, efx->rss_spread);
708 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
713 lockdep_assert_held(&efx->rps_hash_lock);
714 if (!efx->rps_hash_table)
716 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
719 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
726 head = efx_rps_hash_bucket(efx, spec);
737 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
745 head = efx_rps_hash_bucket(efx, spec);
764 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
770 head = efx_rps_hash_bucket(efx, spec);
794 int efx_probe_filters(struct efx_nic *efx)
798 mutex_lock(&efx->mac_lock);
799 rc = efx->type->filter_table_probe(efx);
804 if (efx->type->offload_features & NETIF_F_NTUPLE) {
808 efx_for_each_channel(channel, efx) {
810 kcalloc(efx->type->max_rx_ip_filters,
817 i < efx->type->max_rx_ip_filters;
826 efx_for_each_channel(channel, efx) {
830 efx->type->filter_table_remove(efx);
837 mutex_unlock(&efx->mac_lock);
841 void efx_remove_filters(struct efx_nic *efx)
846 efx_for_each_channel(channel, efx) {
852 efx->type->filter_table_remove(efx);
861 struct efx_nic *efx = efx_netdev_priv(req->net_dev);
862 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
863 int slot_idx = req - efx->rps_slot;
868 rc = efx->type->filter_insert(efx, &req->spec, true);
871 rc %= efx->type->max_rx_ip_filters;
872 if (efx->rps_hash_table) {
873 spin_lock_bh(&efx->rps_hash_lock);
874 rule = efx_rps_hash_find(efx, &req->spec);
888 spin_unlock_bh(&efx->rps_hash_lock);
894 mutex_lock(&efx->rps_mutex);
898 mutex_unlock(&efx->rps_mutex);
901 netif_info(efx, rx_status, efx->net_dev,
908 netif_info(efx, rx_status, efx->net_dev,
917 netif_dbg(efx, rx_status, efx->net_dev,
924 netif_dbg(efx, rx_status, efx->net_dev,
939 clear_bit(slot_idx, &efx->rps_slot_map);
946 struct efx_nic *efx = efx_netdev_priv(net_dev);
956 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
980 req = efx->rps_slot + slot_idx;
982 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1004 if (efx->rps_hash_table) {
1006 spin_lock(&efx->rps_hash_lock);
1007 rule = efx_rps_hash_add(efx, &req->spec, &new);
1013 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1021 spin_unlock(&efx->rps_hash_lock);
1039 spin_unlock(&efx->rps_hash_lock);
1041 clear_bit(slot_idx, &efx->rps_slot_map);
1047 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1048 struct efx_nic *efx = channel->efx;
1052 if (!mutex_trylock(&efx->rps_mutex))
1054 expire_one = efx->type->filter_rfs_expire_one;
1057 size = efx->type->max_rx_ip_filters;
1063 if (expire_one(efx, flow_id, index)) {
1064 netif_info(efx, rx_status, efx->net_dev,
1083 mutex_unlock(&efx->rps_mutex);