Lines Matching refs:efx
14 #include "efx.h"
79 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
89 netif_warn(efx, probe, efx->net_dev,
107 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
117 if (efx->type->sriov_wanted) {
118 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
119 count > efx_vf_size(efx)) {
120 netif_warn(efx, probe, efx->net_dev,
124 count, efx_vf_size(efx));
125 count = efx_vf_size(efx);
133 static int efx_allocate_msix_channels(struct efx_nic *efx,
156 vec_count = pci_msix_vec_count(efx->pci_dev);
167 netif_err(efx, drv, efx->net_dev,
170 efx->n_xdp_channels = 0;
171 efx->xdp_tx_per_channel = 0;
172 efx->xdp_tx_queue_count = 0;
173 } else if (n_channels + n_xdp_tx > efx->max_vis) {
174 netif_err(efx, drv, efx->net_dev,
176 n_xdp_tx, n_channels, efx->max_vis);
177 efx->n_xdp_channels = 0;
178 efx->xdp_tx_per_channel = 0;
179 efx->xdp_tx_queue_count = 0;
181 efx->n_xdp_channels = n_xdp_ev;
182 efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
183 efx->xdp_tx_queue_count = n_xdp_tx;
185 netif_dbg(efx, drv, efx->net_dev,
191 netif_err(efx, drv, efx->net_dev,
194 netif_err(efx, drv, efx->net_dev,
201 efx->n_channels = n_channels;
204 n_channels -= efx->n_xdp_channels;
207 efx->n_tx_channels =
209 efx->max_tx_channels);
210 efx->tx_channel_offset =
211 n_channels - efx->n_tx_channels;
212 efx->n_rx_channels =
214 efx->n_tx_channels, 1U);
216 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
217 efx->tx_channel_offset = 0;
218 efx->n_rx_channels = n_channels;
221 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
222 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
224 efx->xdp_channel_offset = n_channels;
226 netif_dbg(efx, drv, efx->net_dev,
228 efx->n_rx_channels);
230 return efx->n_channels;
236 int efx_probe_interrupts(struct efx_nic *efx)
244 if (efx->extra_channel_type[i])
247 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
248 unsigned int parallelism = efx_wanted_parallelism(efx);
252 rc = efx_allocate_msix_channels(efx, efx->max_channels,
258 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
263 netif_err(efx, drv, efx->net_dev,
265 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
266 efx->interrupt_mode = EFX_INT_MODE_MSI;
270 netif_err(efx, drv, efx->net_dev,
273 netif_err(efx, drv, efx->net_dev,
279 for (i = 0; i < efx->n_channels; i++)
280 efx_get_channel(efx, i)->irq =
286 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
287 efx->n_channels = 1;
288 efx->n_rx_channels = 1;
289 efx->n_tx_channels = 1;
290 efx->tx_channel_offset = 0;
291 efx->n_xdp_channels = 0;
292 efx->xdp_channel_offset = efx->n_channels;
293 rc = pci_enable_msi(efx->pci_dev);
295 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
297 netif_err(efx, drv, efx->net_dev,
299 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
300 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
307 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
308 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
309 efx->n_rx_channels = 1;
310 efx->n_tx_channels = 1;
311 efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
312 efx->n_xdp_channels = 0;
313 efx->xdp_channel_offset = efx->n_channels;
314 efx->legacy_irq = efx->pci_dev->irq;
318 efx->n_extra_tx_channels = 0;
319 j = efx->xdp_channel_offset;
321 if (!efx->extra_channel_type[i])
323 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
324 efx->extra_channel_type[i]->handle_no_channel(efx);
327 efx_get_channel(efx, j)->type =
328 efx->extra_channel_type[i];
329 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
330 efx->n_extra_tx_channels++;
334 rss_spread = efx->n_rx_channels;
337 if (efx->type->sriov_wanted) {
338 efx->rss_spread = ((rss_spread > 1 ||
339 !efx->type->sriov_wanted(efx)) ?
340 rss_spread : efx_vf_size(efx));
344 efx->rss_spread = rss_spread;
350 void efx_set_interrupt_affinity(struct efx_nic *efx)
355 efx_for_each_channel(channel, efx) {
357 pcibus_to_node(efx->pci_dev->bus));
362 void efx_clear_interrupt_affinity(struct efx_nic *efx)
366 efx_for_each_channel(channel, efx)
371 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
376 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
381 void efx_remove_interrupts(struct efx_nic *efx)
386 efx_for_each_channel(channel, efx)
388 pci_disable_msi(efx->pci_dev);
389 pci_disable_msix(efx->pci_dev);
392 efx->legacy_irq = 0;
406 struct efx_nic *efx = channel->efx;
409 netif_dbg(efx, probe, efx->net_dev,
415 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
425 struct efx_nic *efx = channel->efx;
430 netif_dbg(efx, drv, efx->net_dev,
435 efx->type->push_irq_moderation(channel);
445 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
471 netif_dbg(channel->efx, drv, channel->efx->net_dev,
480 netif_dbg(channel->efx, drv, channel->efx->net_dev,
510 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
521 channel->efx = efx;
527 tx_queue->efx = efx;
538 rx_queue->efx = efx;
544 int efx_init_channels(struct efx_nic *efx)
549 efx->channel[i] = efx_alloc_channel(efx, i);
550 if (!efx->channel[i])
552 efx->msi_context[i].efx = efx;
553 efx->msi_context[i].index = i;
557 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
560 efx->max_channels = EFX_MAX_CHANNELS;
561 efx->max_tx_channels = EFX_MAX_CHANNELS;
566 void efx_fini_channels(struct efx_nic *efx)
571 if (efx->channel[i]) {
572 kfree(efx->channel[i]);
573 efx->channel[i] = NULL;
625 netif_dbg(channel->efx, probe, channel->efx->net_dev,
659 struct efx_nic *efx = channel->efx;
665 if (number >= efx->xdp_channel_offset &&
666 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
668 number -= efx->xdp_channel_offset;
669 } else if (efx->tx_channel_offset == 0) {
671 } else if (number < efx->tx_channel_offset) {
675 number -= efx->tx_channel_offset;
677 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
680 void efx_set_channel_names(struct efx_nic *efx)
684 efx_for_each_channel(channel, efx)
686 efx->msi_context[channel->channel].name,
687 sizeof(efx->msi_context[0].name));
690 int efx_probe_channels(struct efx_nic *efx)
696 efx->next_buffer_table = 0;
703 efx_for_each_channel_rev(channel, efx) {
706 netif_err(efx, probe, efx->net_dev,
712 efx_set_channel_names(efx);
717 efx_remove_channels(efx);
726 netif_dbg(channel->efx, drv, channel->efx->net_dev,
737 void efx_remove_channels(struct efx_nic *efx)
741 efx_for_each_channel(channel, efx)
744 kfree(efx->xdp_tx_queues);
747 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
750 *ptp_channel = efx_ptp_channel(efx);
751 struct efx_ptp_data *ptp_data = efx->ptp_data;
756 rc = efx_check_disabled(efx);
763 efx_for_each_channel(channel, efx) {
782 efx_device_detach_sync(efx);
783 efx_stop_all(efx);
784 efx_soft_disable_interrupts(efx);
788 for (i = 0; i < efx->n_channels; i++) {
789 channel = efx->channel[i];
800 old_rxq_entries = efx->rxq_entries;
801 old_txq_entries = efx->txq_entries;
802 efx->rxq_entries = rxq_entries;
803 efx->txq_entries = txq_entries;
804 for (i = 0; i < efx->n_channels; i++)
805 swap(efx->channel[i], other_channel[i]);
808 efx->next_buffer_table = next_buffer_table;
810 for (i = 0; i < efx->n_channels; i++) {
811 channel = efx->channel[i];
817 efx_init_napi_channel(efx->channel[i]);
821 efx->ptp_data = NULL;
823 for (i = 0; i < efx->n_channels; i++) {
832 efx->ptp_data = ptp_data;
833 rc2 = efx_soft_enable_interrupts(efx);
836 netif_err(efx, drv, efx->net_dev,
838 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
840 efx_start_all(efx);
841 efx_device_attach_if_not_resetting(efx);
847 efx->rxq_entries = old_rxq_entries;
848 efx->txq_entries = old_txq_entries;
849 for (i = 0; i < efx->n_channels; i++)
850 swap(efx->channel[i], other_channel[i]);
851 efx_ptp_update_channel(efx, ptp_channel);
855 int efx_set_channels(struct efx_nic *efx)
863 if (efx->xdp_tx_queue_count) {
864 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
867 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
868 sizeof(*efx->xdp_tx_queues),
870 if (!efx->xdp_tx_queues)
879 efx_for_each_channel(channel, efx) {
880 if (channel->channel < efx->n_rx_channels)
885 if (channel->channel >= efx->tx_channel_offset) {
896 if (xdp_queue_number < efx->xdp_tx_queue_count) {
897 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
900 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
907 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
914 WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
916 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
919 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
924 return channel->channel - channel->efx->tx_channel_offset <
925 channel->efx->n_tx_channels;
932 int efx_soft_enable_interrupts(struct efx_nic *efx)
937 BUG_ON(efx->state == STATE_DISABLED);
939 efx->irq_soft_enabled = true;
942 efx_for_each_channel(channel, efx) {
951 efx_mcdi_mode_event(efx);
956 efx_for_each_channel(channel, efx) {
967 void efx_soft_disable_interrupts(struct efx_nic *efx)
971 if (efx->state == STATE_DISABLED)
974 efx_mcdi_mode_poll(efx);
976 efx->irq_soft_enabled = false;
979 if (efx->legacy_irq)
980 synchronize_irq(efx->legacy_irq);
982 efx_for_each_channel(channel, efx) {
992 efx_mcdi_flush_async(efx);
995 int efx_enable_interrupts(struct efx_nic *efx)
1001 BUG_ON(efx->state == STATE_DISABLED);
1003 if (efx->eeh_disabled_legacy_irq) {
1004 enable_irq(efx->legacy_irq);
1005 efx->eeh_disabled_legacy_irq = false;
1008 efx->type->irq_enable_master(efx);
1010 efx_for_each_channel(channel, efx) {
1018 rc = efx_soft_enable_interrupts(efx);
1026 efx_for_each_channel(channel, efx) {
1033 efx->type->irq_disable_non_ev(efx);
1038 void efx_disable_interrupts(struct efx_nic *efx)
1042 efx_soft_disable_interrupts(efx);
1044 efx_for_each_channel(channel, efx) {
1049 efx->type->irq_disable_non_ev(efx);
1052 void efx_start_channels(struct efx_nic *efx)
1058 efx_for_each_channel(channel, efx) {
1061 atomic_inc(&efx->active_queues);
1066 atomic_inc(&efx->active_queues);
1076 void efx_stop_channels(struct efx_nic *efx)
1084 efx_for_each_channel(channel, efx) {
1089 efx_for_each_channel(channel, efx) {
1102 if (efx->type->fini_dmaq)
1103 rc = efx->type->fini_dmaq(efx);
1106 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1108 netif_dbg(efx, drv, efx->net_dev,
1112 efx_for_each_channel(channel, efx) {
1177 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1179 int step = efx->irq_mod_step_us;
1184 efx->type->push_irq_moderation(channel);
1188 efx->irq_rx_moderation_us) {
1190 efx->type->push_irq_moderation(channel);
1207 struct efx_nic *efx = channel->efx;
1213 netif_vdbg(efx, intr, efx->net_dev,
1223 efx->irq_rx_adaptive &&
1225 efx_update_irq_mod(efx, channel);
1250 struct efx_nic *efx = channel->efx;
1252 channel->napi_dev = efx->net_dev;
1257 void efx_init_napi(struct efx_nic *efx)
1261 efx_for_each_channel(channel, efx)
1273 void efx_fini_napi(struct efx_nic *efx)
1277 efx_for_each_channel(channel, efx)