Lines Matching refs:efx

15 #include "efx.h"
55 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
62 netif_warn(efx, probe, efx->net_dev,
70 cpumask_of_pcibus(efx->pci_dev->bus));
83 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
90 count = count_online_cores(efx, true);
94 count = count_online_cores(efx, false);
98 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
108 if (efx->type->sriov_wanted) {
109 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
110 count > efx_vf_size(efx)) {
111 netif_warn(efx, probe, efx->net_dev,
115 count, efx_vf_size(efx));
116 count = efx_vf_size(efx);
124 static int efx_allocate_msix_channels(struct efx_nic *efx,
144 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
149 vec_count = pci_msix_vec_count(efx->pci_dev);
160 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
161 netif_warn(efx, drv, efx->net_dev,
164 netif_warn(efx, drv, efx->net_dev,
166 } else if (n_channels + n_xdp_tx > efx->max_vis) {
167 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
168 netif_warn(efx, drv, efx->net_dev,
170 n_xdp_tx, n_channels, efx->max_vis);
171 netif_warn(efx, drv, efx->net_dev,
174 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
175 netif_warn(efx, drv, efx->net_dev,
180 netif_warn(efx, drv, efx->net_dev,
184 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
187 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
188 efx->n_xdp_channels = n_xdp_ev;
189 efx->xdp_tx_per_channel = tx_per_ev;
190 efx->xdp_tx_queue_count = n_xdp_tx;
192 netif_dbg(efx, drv, efx->net_dev,
196 efx->n_xdp_channels = 0;
197 efx->xdp_tx_per_channel = 0;
198 efx->xdp_tx_queue_count = n_xdp_tx;
202 netif_err(efx, drv, efx->net_dev,
205 netif_err(efx, drv, efx->net_dev,
212 efx->n_channels = n_channels;
215 n_channels -= efx->n_xdp_channels;
218 efx->n_tx_channels =
220 efx->max_tx_channels);
221 efx->tx_channel_offset =
222 n_channels - efx->n_tx_channels;
223 efx->n_rx_channels =
225 efx->n_tx_channels, 1U);
227 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
228 efx->tx_channel_offset = 0;
229 efx->n_rx_channels = n_channels;
232 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
233 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
235 efx->xdp_channel_offset = n_channels;
237 netif_dbg(efx, drv, efx->net_dev,
239 efx->n_rx_channels);
241 return efx->n_channels;
247 int efx_probe_interrupts(struct efx_nic *efx)
255 if (efx->extra_channel_type[i])
258 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
259 unsigned int parallelism = efx_wanted_parallelism(efx);
263 rc = efx_allocate_msix_channels(efx, efx->max_channels,
269 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
274 netif_err(efx, drv, efx->net_dev,
276 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
277 efx->interrupt_mode = EFX_INT_MODE_MSI;
281 netif_err(efx, drv, efx->net_dev,
284 netif_err(efx, drv, efx->net_dev,
290 for (i = 0; i < efx->n_channels; i++)
291 efx_get_channel(efx, i)->irq =
297 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
298 efx->n_channels = 1;
299 efx->n_rx_channels = 1;
300 efx->n_tx_channels = 1;
301 efx->tx_channel_offset = 0;
302 efx->n_xdp_channels = 0;
303 efx->xdp_channel_offset = efx->n_channels;
304 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
305 rc = pci_enable_msi(efx->pci_dev);
307 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
309 netif_err(efx, drv, efx->net_dev,
311 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
312 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
319 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
320 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
321 efx->n_rx_channels = 1;
322 efx->n_tx_channels = 1;
323 efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
324 efx->n_xdp_channels = 0;
325 efx->xdp_channel_offset = efx->n_channels;
326 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
327 efx->legacy_irq = efx->pci_dev->irq;
331 efx->n_extra_tx_channels = 0;
332 j = efx->xdp_channel_offset;
334 if (!efx->extra_channel_type[i])
336 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
337 efx->extra_channel_type[i]->handle_no_channel(efx);
340 efx_get_channel(efx, j)->type =
341 efx->extra_channel_type[i];
342 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
343 efx->n_extra_tx_channels++;
347 rss_spread = efx->n_rx_channels;
350 if (efx->type->sriov_wanted) {
351 efx->rss_spread = ((rss_spread > 1 ||
352 !efx->type->sriov_wanted(efx)) ?
353 rss_spread : efx_vf_size(efx));
357 efx->rss_spread = rss_spread;
363 void efx_set_interrupt_affinity(struct efx_nic *efx)
365 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
374 efx_for_each_channel(channel, efx) {
382 void efx_clear_interrupt_affinity(struct efx_nic *efx)
386 efx_for_each_channel(channel, efx)
391 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
396 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
401 void efx_remove_interrupts(struct efx_nic *efx)
406 efx_for_each_channel(channel, efx)
408 pci_disable_msi(efx->pci_dev);
409 pci_disable_msix(efx->pci_dev);
412 efx->legacy_irq = 0;
426 struct efx_nic *efx = channel->efx;
429 netif_dbg(efx, probe, efx->net_dev,
435 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
445 struct efx_nic *efx = channel->efx;
450 netif_dbg(efx, drv, efx->net_dev,
455 efx->type->push_irq_moderation(channel);
465 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
491 netif_dbg(channel->efx, drv, channel->efx->net_dev,
500 netif_dbg(channel->efx, drv, channel->efx->net_dev,
530 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
541 channel->efx = efx;
547 tx_queue->efx = efx;
558 rx_queue->efx = efx;
564 int efx_init_channels(struct efx_nic *efx)
569 efx->channel[i] = efx_alloc_channel(efx, i);
570 if (!efx->channel[i])
572 efx->msi_context[i].efx = efx;
573 efx->msi_context[i].index = i;
577 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
580 efx->max_channels = EFX_MAX_CHANNELS;
581 efx->max_tx_channels = EFX_MAX_CHANNELS;
586 void efx_fini_channels(struct efx_nic *efx)
591 if (efx->channel[i]) {
592 kfree(efx->channel[i]);
593 efx->channel[i] = NULL;
645 netif_dbg(channel->efx, probe, channel->efx->net_dev,
680 struct efx_nic *efx = channel->efx;
686 if (number >= efx->xdp_channel_offset &&
687 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
689 number -= efx->xdp_channel_offset;
690 } else if (efx->tx_channel_offset == 0) {
692 } else if (number < efx->tx_channel_offset) {
696 number -= efx->tx_channel_offset;
698 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
701 void efx_set_channel_names(struct efx_nic *efx)
705 efx_for_each_channel(channel, efx)
707 efx->msi_context[channel->channel].name,
708 sizeof(efx->msi_context[0].name));
711 int efx_probe_channels(struct efx_nic *efx)
721 efx_for_each_channel_rev(channel, efx) {
724 netif_err(efx, probe, efx->net_dev,
730 efx_set_channel_names(efx);
735 efx_remove_channels(efx);
744 netif_dbg(channel->efx, drv, channel->efx->net_dev,
755 void efx_remove_channels(struct efx_nic *efx)
759 efx_for_each_channel(channel, efx)
762 kfree(efx->xdp_tx_queues);
765 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
768 if (xdp_queue_number >= efx->xdp_tx_queue_count)
771 netif_dbg(efx, drv, efx->net_dev,
775 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
779 static void efx_set_xdp_channels(struct efx_nic *efx)
791 efx_for_each_channel(channel, efx) {
792 if (channel->channel < efx->tx_channel_offset)
798 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
806 netif_dbg(efx, drv, efx->net_dev,
817 if (efx->xdp_txq_queues_mode ==
820 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
827 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
828 xdp_queue_number != efx->xdp_tx_queue_count);
829 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
830 xdp_queue_number > efx->xdp_tx_queue_count);
836 while (xdp_queue_number < efx->xdp_tx_queue_count) {
837 tx_queue = efx->xdp_tx_queues[next_queue++];
838 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
844 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
847 *ptp_channel = efx_ptp_channel(efx);
848 struct efx_ptp_data *ptp_data = efx->ptp_data;
853 rc = efx_check_disabled(efx);
857 efx_device_detach_sync(efx);
858 efx_stop_all(efx);
859 efx_soft_disable_interrupts(efx);
863 for (i = 0; i < efx->n_channels; i++) {
864 channel = efx->channel[i];
875 old_rxq_entries = efx->rxq_entries;
876 old_txq_entries = efx->txq_entries;
877 efx->rxq_entries = rxq_entries;
878 efx->txq_entries = txq_entries;
879 for (i = 0; i < efx->n_channels; i++)
880 swap(efx->channel[i], other_channel[i]);
882 for (i = 0; i < efx->n_channels; i++) {
883 channel = efx->channel[i];
889 efx_init_napi_channel(efx->channel[i]);
892 efx_set_xdp_channels(efx);
894 efx->ptp_data = NULL;
896 for (i = 0; i < efx->n_channels; i++) {
905 efx->ptp_data = ptp_data;
906 rc2 = efx_soft_enable_interrupts(efx);
909 netif_err(efx, drv, efx->net_dev,
911 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
913 efx_start_all(efx);
914 efx_device_attach_if_not_resetting(efx);
920 efx->rxq_entries = old_rxq_entries;
921 efx->txq_entries = old_txq_entries;
922 for (i = 0; i < efx->n_channels; i++)
923 swap(efx->channel[i], other_channel[i]);
924 efx_ptp_update_channel(efx, ptp_channel);
928 int efx_set_channels(struct efx_nic *efx)
933 if (efx->xdp_tx_queue_count) {
934 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
937 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
938 sizeof(*efx->xdp_tx_queues),
940 if (!efx->xdp_tx_queues)
944 efx_for_each_channel(channel, efx) {
945 if (channel->channel < efx->n_rx_channels)
951 efx_set_xdp_channels(efx);
953 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
956 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
961 return channel->channel - channel->efx->tx_channel_offset <
962 channel->efx->n_tx_channels;
969 int efx_soft_enable_interrupts(struct efx_nic *efx)
974 BUG_ON(efx->state == STATE_DISABLED);
976 efx->irq_soft_enabled = true;
979 efx_for_each_channel(channel, efx) {
988 efx_mcdi_mode_event(efx);
993 efx_for_each_channel(channel, efx) {
1004 void efx_soft_disable_interrupts(struct efx_nic *efx)
1008 if (efx->state == STATE_DISABLED)
1011 efx_mcdi_mode_poll(efx);
1013 efx->irq_soft_enabled = false;
1016 if (efx->legacy_irq)
1017 synchronize_irq(efx->legacy_irq);
1019 efx_for_each_channel(channel, efx) {
1029 efx_mcdi_flush_async(efx);
1032 int efx_enable_interrupts(struct efx_nic *efx)
1038 BUG_ON(efx->state == STATE_DISABLED);
1040 if (efx->eeh_disabled_legacy_irq) {
1041 enable_irq(efx->legacy_irq);
1042 efx->eeh_disabled_legacy_irq = false;
1045 efx->type->irq_enable_master(efx);
1047 efx_for_each_channel(channel, efx) {
1055 rc = efx_soft_enable_interrupts(efx);
1063 efx_for_each_channel(channel, efx) {
1070 efx->type->irq_disable_non_ev(efx);
1075 void efx_disable_interrupts(struct efx_nic *efx)
1079 efx_soft_disable_interrupts(efx);
1081 efx_for_each_channel(channel, efx) {
1086 efx->type->irq_disable_non_ev(efx);
1089 void efx_start_channels(struct efx_nic *efx)
1095 efx_for_each_channel_rev(channel, efx) {
1100 atomic_inc(&efx->active_queues);
1105 atomic_inc(&efx->active_queues);
1115 void efx_stop_channels(struct efx_nic *efx)
1126 efx_for_each_channel(channel, efx) {
1133 efx_for_each_channel(channel, efx) {
1146 if (efx->type->fini_dmaq)
1147 rc = efx->type->fini_dmaq(efx);
1150 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1152 netif_dbg(efx, drv, efx->net_dev,
1156 efx_for_each_channel(channel, efx) {
1221 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1223 int step = efx->irq_mod_step_us;
1228 efx->type->push_irq_moderation(channel);
1232 efx->irq_rx_moderation_us) {
1234 efx->type->push_irq_moderation(channel);
1251 struct efx_nic *efx = channel->efx;
1257 netif_vdbg(efx, intr, efx->net_dev,
1267 efx->irq_rx_adaptive &&
1269 efx_update_irq_mod(efx, channel);
1294 struct efx_nic *efx = channel->efx;
1296 channel->napi_dev = efx->net_dev;
1300 void efx_init_napi(struct efx_nic *efx)
1304 efx_for_each_channel(channel, efx)
1316 void efx_fini_napi(struct efx_nic *efx)
1320 efx_for_each_channel(channel, efx)