Lines Matching refs:efx

22 #include "efx.h"
188 static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
189 static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
191 static void ef4_remove_channels(struct ef4_nic *efx);
193 static void ef4_remove_port(struct ef4_nic *efx);
195 static void ef4_fini_napi(struct ef4_nic *efx);
197 static void ef4_fini_struct(struct ef4_nic *efx);
198 static void ef4_start_all(struct ef4_nic *efx);
199 static void ef4_stop_all(struct ef4_nic *efx);
201 #define EF4_ASSERT_RESET_SERIALISED(efx) \
203 if ((efx->state == STATE_READY) || \
204 (efx->state == STATE_RECOVERY) || \
205 (efx->state == STATE_DISABLED)) \
209 static int ef4_check_disabled(struct ef4_nic *efx)
211 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
212 netif_err(efx, drv, efx->net_dev,
270 static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
272 int step = efx->irq_mod_step_us;
277 efx->type->push_irq_moderation(channel);
281 efx->irq_rx_moderation_us) {
283 efx->type->push_irq_moderation(channel);
295 struct ef4_nic *efx = channel->efx;
298 netif_vdbg(efx, intr, efx->net_dev,
306 efx->irq_rx_adaptive &&
308 ef4_update_irq_mod(efx, channel);
332 struct ef4_nic *efx = channel->efx;
335 netif_dbg(efx, probe, efx->net_dev,
340 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
350 struct ef4_nic *efx = channel->efx;
355 netif_dbg(efx, drv, efx->net_dev,
360 efx->type->push_irq_moderation(channel);
370 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
396 netif_dbg(channel->efx, drv, channel->efx->net_dev,
405 netif_dbg(channel->efx, drv, channel->efx->net_dev,
419 ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
430 channel->efx = efx;
436 tx_queue->efx = efx;
442 rx_queue->efx = efx;
493 netif_dbg(channel->efx, probe, channel->efx->net_dev,
526 struct ef4_nic *efx = channel->efx;
531 if (efx->tx_channel_offset == 0) {
533 } else if (channel->channel < efx->tx_channel_offset) {
537 number -= efx->tx_channel_offset;
539 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
542 static void ef4_set_channel_names(struct ef4_nic *efx)
546 ef4_for_each_channel(channel, efx)
548 efx->msi_context[channel->channel].name,
549 sizeof(efx->msi_context[0].name));
552 static int ef4_probe_channels(struct ef4_nic *efx)
558 efx->next_buffer_table = 0;
565 ef4_for_each_channel_rev(channel, efx) {
568 netif_err(efx, probe, efx->net_dev,
574 ef4_set_channel_names(efx);
579 ef4_remove_channels(efx);
587 static void ef4_start_datapath(struct ef4_nic *efx)
589 netdev_features_t old_features = efx->net_dev->features;
590 bool old_rx_scatter = efx->rx_scatter;
600 efx->rx_dma_len = (efx->rx_prefix_size +
601 EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
602 efx->type->rx_buffer_padding);
604 efx->rx_ip_align + efx->rx_dma_len);
606 efx->rx_scatter = efx->type->always_rx_scatter;
607 efx->rx_buffer_order = 0;
608 } else if (efx->type->can_rx_scatter) {
614 efx->rx_scatter = true;
615 efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
616 efx->rx_buffer_order = 0;
618 efx->rx_scatter = false;
619 efx->rx_buffer_order = get_order(rx_buf_len);
622 ef4_rx_config_page_split(efx);
623 if (efx->rx_buffer_order)
624 netif_dbg(efx, drv, efx->net_dev,
626 efx->rx_dma_len, efx->rx_buffer_order,
627 efx->rx_pages_per_batch);
629 netif_dbg(efx, drv, efx->net_dev,
631 efx->rx_dma_len, efx->rx_page_buf_step,
632 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
637 efx->net_dev->hw_features |= efx->net_dev->features;
638 efx->net_dev->hw_features &= ~efx->fixed_features;
639 efx->net_dev->features |= efx->fixed_features;
640 if (efx->net_dev->features != old_features)
641 netdev_features_change(efx->net_dev);
644 if (efx->rx_scatter != old_rx_scatter)
645 efx->type->filter_update_rx_scatter(efx);
654 efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
655 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
658 ef4_for_each_channel(channel, efx) {
661 atomic_inc(&efx->active_queues);
666 atomic_inc(&efx->active_queues);
675 if (netif_device_present(efx->net_dev))
676 netif_tx_wake_all_queues(efx->net_dev);
679 static void ef4_stop_datapath(struct ef4_nic *efx)
686 EF4_ASSERT_RESET_SERIALISED(efx);
687 BUG_ON(efx->port_enabled);
690 ef4_for_each_channel(channel, efx) {
695 ef4_for_each_channel(channel, efx) {
708 rc = efx->type->fini_dmaq(efx);
709 if (rc && EF4_WORKAROUND_7803(efx)) {
715 netif_err(efx, drv, efx->net_dev,
717 ef4_schedule_reset(efx, RESET_TYPE_ALL);
719 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
721 netif_dbg(efx, drv, efx->net_dev,
725 ef4_for_each_channel(channel, efx) {
738 netif_dbg(channel->efx, drv, channel->efx->net_dev,
749 static void ef4_remove_channels(struct ef4_nic *efx)
753 ef4_for_each_channel(channel, efx)
758 ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
765 rc = ef4_check_disabled(efx);
772 ef4_for_each_channel(channel, efx) {
791 ef4_device_detach_sync(efx);
792 ef4_stop_all(efx);
793 ef4_soft_disable_interrupts(efx);
797 for (i = 0; i < efx->n_channels; i++) {
798 channel = efx->channel[i];
809 old_rxq_entries = efx->rxq_entries;
810 old_txq_entries = efx->txq_entries;
811 efx->rxq_entries = rxq_entries;
812 efx->txq_entries = txq_entries;
813 for (i = 0; i < efx->n_channels; i++) {
814 swap(efx->channel[i], other_channel[i]);
818 efx->next_buffer_table = next_buffer_table;
820 for (i = 0; i < efx->n_channels; i++) {
821 channel = efx->channel[i];
827 ef4_init_napi_channel(efx->channel[i]);
832 for (i = 0; i < efx->n_channels; i++) {
841 rc2 = ef4_soft_enable_interrupts(efx);
844 netif_err(efx, drv, efx->net_dev,
846 ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
848 ef4_start_all(efx);
849 netif_device_attach(efx->net_dev);
855 efx->rxq_entries = old_rxq_entries;
856 efx->txq_entries = old_txq_entries;
857 for (i = 0; i < efx->n_channels; i++) {
858 swap(efx->channel[i], other_channel[i]);
895 void ef4_link_status_changed(struct ef4_nic *efx)
897 struct ef4_link_state *link_state = &efx->link_state;
903 if (!netif_running(efx->net_dev))
906 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
907 efx->n_link_state_changes++;
910 netif_carrier_on(efx->net_dev);
912 netif_carrier_off(efx->net_dev);
917 netif_info(efx, link, efx->net_dev,
920 efx->net_dev->mtu);
922 netif_info(efx, link, efx->net_dev, "link down\n");
925 void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
927 efx->link_advertising = advertising;
930 efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
932 efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
934 efx->wanted_fc ^= EF4_FC_TX;
938 void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
940 efx->wanted_fc = wanted_fc;
941 if (efx->link_advertising) {
943 efx->link_advertising |= (ADVERTISED_Pause |
946 efx->link_advertising &= ~(ADVERTISED_Pause |
949 efx->link_advertising ^= ADVERTISED_Asym_Pause;
953 static void ef4_fini_port(struct ef4_nic *efx);
955 /* We assume that efx->type->reconfigure_mac will always try to sync RX
958 void ef4_mac_reconfigure(struct ef4_nic *efx)
960 down_read(&efx->filter_sem);
961 efx->type->reconfigure_mac(efx);
962 up_read(&efx->filter_sem);
972 int __ef4_reconfigure_port(struct ef4_nic *efx)
977 WARN_ON(!mutex_is_locked(&efx->mac_lock));
980 phy_mode = efx->phy_mode;
981 if (LOOPBACK_INTERNAL(efx))
982 efx->phy_mode |= PHY_MODE_TX_DISABLED;
984 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
986 rc = efx->type->reconfigure_port(efx);
989 efx->phy_mode = phy_mode;
996 int ef4_reconfigure_port(struct ef4_nic *efx)
1000 EF4_ASSERT_RESET_SERIALISED(efx);
1002 mutex_lock(&efx->mac_lock);
1003 rc = __ef4_reconfigure_port(efx);
1004 mutex_unlock(&efx->mac_lock);
1014 struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1016 mutex_lock(&efx->mac_lock);
1017 if (efx->port_enabled)
1018 ef4_mac_reconfigure(efx);
1019 mutex_unlock(&efx->mac_lock);
1022 static int ef4_probe_port(struct ef4_nic *efx)
1026 netif_dbg(efx, probe, efx->net_dev, "create port\n");
1029 efx->phy_mode = PHY_MODE_SPECIAL;
1032 rc = efx->type->probe_port(efx);
1037 eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
1042 static int ef4_init_port(struct ef4_nic *efx)
1046 netif_dbg(efx, drv, efx->net_dev, "init port\n");
1048 mutex_lock(&efx->mac_lock);
1050 rc = efx->phy_op->init(efx);
1054 efx->port_initialized = true;
1058 ef4_mac_reconfigure(efx);
1061 rc = efx->phy_op->reconfigure(efx);
1065 mutex_unlock(&efx->mac_lock);
1069 efx->phy_op->fini(efx);
1071 mutex_unlock(&efx->mac_lock);
1075 static void ef4_start_port(struct ef4_nic *efx)
1077 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1078 BUG_ON(efx->port_enabled);
1080 mutex_lock(&efx->mac_lock);
1081 efx->port_enabled = true;
1084 ef4_mac_reconfigure(efx);
1086 mutex_unlock(&efx->mac_lock);
1094 static void ef4_stop_port(struct ef4_nic *efx)
1096 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1098 EF4_ASSERT_RESET_SERIALISED(efx);
1100 mutex_lock(&efx->mac_lock);
1101 efx->port_enabled = false;
1102 mutex_unlock(&efx->mac_lock);
1105 netif_addr_lock_bh(efx->net_dev);
1106 netif_addr_unlock_bh(efx->net_dev);
1108 cancel_delayed_work_sync(&efx->monitor_work);
1109 ef4_selftest_async_cancel(efx);
1110 cancel_work_sync(&efx->mac_work);
1113 static void ef4_fini_port(struct ef4_nic *efx)
1115 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1117 if (!efx->port_initialized)
1120 efx->phy_op->fini(efx);
1121 efx->port_initialized = false;
1123 efx->link_state.up = false;
1124 ef4_link_status_changed(efx);
1127 static void ef4_remove_port(struct ef4_nic *efx)
1129 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1131 efx->type->remove_port(efx);
1150 static void ef4_associate(struct ef4_nic *efx)
1154 if (efx->primary == efx) {
1157 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1158 list_add_tail(&efx->node, &ef4_primary_list);
1162 if (ef4_same_controller(efx, other)) {
1166 pci_name(efx->pci_dev),
1167 efx->net_dev->name);
1169 &efx->secondary_list);
1170 other->primary = efx;
1177 if (ef4_same_controller(efx, other)) {
1178 netif_dbg(efx, probe, efx->net_dev,
1182 list_add_tail(&efx->node,
1184 efx->primary = other;
1189 netif_dbg(efx, probe, efx->net_dev,
1191 list_add_tail(&efx->node, &ef4_unassociated_list);
1195 static void ef4_dissociate(struct ef4_nic *efx)
1199 list_del(&efx->node);
1200 efx->primary = NULL;
1202 list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1212 static int ef4_init_io(struct ef4_nic *efx)
1214 struct pci_dev *pci_dev = efx->pci_dev;
1215 dma_addr_t dma_mask = efx->type->max_dma_mask;
1216 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1219 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1221 bar = efx->type->mem_bar;
1225 netif_err(efx, probe, efx->net_dev,
1243 netif_err(efx, probe, efx->net_dev,
1247 netif_dbg(efx, probe, efx->net_dev,
1250 efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1253 netif_err(efx, probe, efx->net_dev,
1258 efx->membase = ioremap(efx->membase_phys, mem_map_size);
1259 if (!efx->membase) {
1260 netif_err(efx, probe, efx->net_dev,
1262 (unsigned long long)efx->membase_phys, mem_map_size);
1266 netif_dbg(efx, probe, efx->net_dev,
1268 (unsigned long long)efx->membase_phys, mem_map_size,
1269 efx->membase);
1274 pci_release_region(efx->pci_dev, bar);
1276 efx->membase_phys = 0;
1278 pci_disable_device(efx->pci_dev);
1283 static void ef4_fini_io(struct ef4_nic *efx)
1287 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1289 if (efx->membase) {
1290 iounmap(efx->membase);
1291 efx->membase = NULL;
1294 if (efx->membase_phys) {
1295 bar = efx->type->mem_bar;
1296 pci_release_region(efx->pci_dev, bar);
1297 efx->membase_phys = 0;
1301 if (!pci_vfs_assigned(efx->pci_dev))
1302 pci_disable_device(efx->pci_dev);
1305 void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1309 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1310 efx->rx_indir_table[i] =
1311 ethtool_rxfh_indir_default(i, efx->rss_spread);
1314 static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1324 netif_warn(efx, probe, efx->net_dev,
1342 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1354 static int ef4_probe_interrupts(struct ef4_nic *efx)
1361 if (efx->extra_channel_type[i])
1364 if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1368 n_channels = ef4_wanted_parallelism(efx);
1372 n_channels = min(n_channels, efx->max_channels);
1376 rc = pci_enable_msix_range(efx->pci_dev,
1380 efx->interrupt_mode = EF4_INT_MODE_MSI;
1381 netif_err(efx, drv, efx->net_dev,
1384 netif_err(efx, drv, efx->net_dev,
1387 netif_err(efx, drv, efx->net_dev,
1393 efx->n_channels = n_channels;
1397 efx->n_tx_channels = min(max(n_channels / 2,
1399 efx->max_tx_channels);
1400 efx->n_rx_channels = max(n_channels -
1401 efx->n_tx_channels,
1404 efx->n_tx_channels = min(n_channels,
1405 efx->max_tx_channels);
1406 efx->n_rx_channels = n_channels;
1408 for (i = 0; i < efx->n_channels; i++)
1409 ef4_get_channel(efx, i)->irq =
1415 if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1416 efx->n_channels = 1;
1417 efx->n_rx_channels = 1;
1418 efx->n_tx_channels = 1;
1419 rc = pci_enable_msi(efx->pci_dev);
1421 ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1423 netif_err(efx, drv, efx->net_dev,
1425 efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1430 if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1431 efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1432 efx->n_rx_channels = 1;
1433 efx->n_tx_channels = 1;
1434 efx->legacy_irq = efx->pci_dev->irq;
1438 j = efx->n_channels;
1440 if (!efx->extra_channel_type[i])
1442 if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1443 efx->n_channels <= extra_channels) {
1444 efx->extra_channel_type[i]->handle_no_channel(efx);
1447 ef4_get_channel(efx, j)->type =
1448 efx->extra_channel_type[i];
1452 efx->rss_spread = efx->n_rx_channels;
1457 static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1462 BUG_ON(efx->state == STATE_DISABLED);
1464 efx->irq_soft_enabled = true;
1467 ef4_for_each_channel(channel, efx) {
1479 ef4_for_each_channel(channel, efx) {
1490 static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1494 if (efx->state == STATE_DISABLED)
1497 efx->irq_soft_enabled = false;
1500 if (efx->legacy_irq)
1501 synchronize_irq(efx->legacy_irq);
1503 ef4_for_each_channel(channel, efx) {
1513 static int ef4_enable_interrupts(struct ef4_nic *efx)
1518 BUG_ON(efx->state == STATE_DISABLED);
1520 if (efx->eeh_disabled_legacy_irq) {
1521 enable_irq(efx->legacy_irq);
1522 efx->eeh_disabled_legacy_irq = false;
1525 efx->type->irq_enable_master(efx);
1527 ef4_for_each_channel(channel, efx) {
1535 rc = ef4_soft_enable_interrupts(efx);
1543 ef4_for_each_channel(channel, efx) {
1550 efx->type->irq_disable_non_ev(efx);
1555 static void ef4_disable_interrupts(struct ef4_nic *efx)
1559 ef4_soft_disable_interrupts(efx);
1561 ef4_for_each_channel(channel, efx) {
1566 efx->type->irq_disable_non_ev(efx);
1569 static void ef4_remove_interrupts(struct ef4_nic *efx)
1574 ef4_for_each_channel(channel, efx)
1576 pci_disable_msi(efx->pci_dev);
1577 pci_disable_msix(efx->pci_dev);
1580 efx->legacy_irq = 0;
1583 static void ef4_set_channels(struct ef4_nic *efx)
1588 efx->tx_channel_offset =
1590 efx->n_channels - efx->n_tx_channels : 0;
1596 ef4_for_each_channel(channel, efx) {
1597 if (channel->channel < efx->n_rx_channels)
1603 tx_queue->queue -= (efx->tx_channel_offset *
1608 static int ef4_probe_nic(struct ef4_nic *efx)
1612 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1615 rc = efx->type->probe(efx);
1620 if (!efx->max_channels || !efx->max_tx_channels) {
1621 netif_err(efx, drv, efx->net_dev,
1631 rc = ef4_probe_interrupts(efx);
1635 ef4_set_channels(efx);
1638 rc = efx->type->dimension_resources(efx);
1644 ef4_remove_interrupts(efx);
1648 if (efx->n_channels > 1)
1649 netdev_rss_key_fill(&efx->rx_hash_key,
1650 sizeof(efx->rx_hash_key));
1651 ef4_set_default_rx_indir_table(efx);
1653 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1654 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1657 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1658 ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1664 ef4_remove_interrupts(efx);
1666 efx->type->remove(efx);
1670 static void ef4_remove_nic(struct ef4_nic *efx)
1672 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1674 ef4_remove_interrupts(efx);
1675 efx->type->remove(efx);
1678 static int ef4_probe_filters(struct ef4_nic *efx)
1682 spin_lock_init(&efx->filter_lock);
1683 init_rwsem(&efx->filter_sem);
1684 mutex_lock(&efx->mac_lock);
1685 down_write(&efx->filter_sem);
1686 rc = efx->type->filter_table_probe(efx);
1691 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1695 ef4_for_each_channel(channel, efx) {
1697 kcalloc(efx->type->max_rx_ip_filters,
1704 i < efx->type->max_rx_ip_filters;
1711 ef4_for_each_channel(channel, efx)
1713 efx->type->filter_table_remove(efx);
1718 efx->rps_expire_index = efx->rps_expire_channel = 0;
1722 up_write(&efx->filter_sem);
1723 mutex_unlock(&efx->mac_lock);
1727 static void ef4_remove_filters(struct ef4_nic *efx)
1732 ef4_for_each_channel(channel, efx)
1735 down_write(&efx->filter_sem);
1736 efx->type->filter_table_remove(efx);
1737 up_write(&efx->filter_sem);
1740 static void ef4_restore_filters(struct ef4_nic *efx)
1742 down_read(&efx->filter_sem);
1743 efx->type->filter_table_restore(efx);
1744 up_read(&efx->filter_sem);
1753 static int ef4_probe_all(struct ef4_nic *efx)
1757 rc = ef4_probe_nic(efx);
1759 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1763 rc = ef4_probe_port(efx);
1765 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1770 if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1774 efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1776 rc = ef4_probe_filters(efx);
1778 netif_err(efx, probe, efx->net_dev,
1783 rc = ef4_probe_channels(efx);
1790 ef4_remove_filters(efx);
1793 ef4_remove_port(efx);
1795 ef4_remove_nic(efx);
1807 static void ef4_start_all(struct ef4_nic *efx)
1809 EF4_ASSERT_RESET_SERIALISED(efx);
1810 BUG_ON(efx->state == STATE_DISABLED);
1814 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1815 efx->reset_pending)
1818 ef4_start_port(efx);
1819 ef4_start_datapath(efx);
1822 if (efx->type->monitor != NULL)
1823 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1826 efx->type->start_stats(efx);
1827 efx->type->pull_stats(efx);
1828 spin_lock_bh(&efx->stats_lock);
1829 efx->type->update_stats(efx, NULL, NULL);
1830 spin_unlock_bh(&efx->stats_lock);
1838 static void ef4_stop_all(struct ef4_nic *efx)
1840 EF4_ASSERT_RESET_SERIALISED(efx);
1843 if (!efx->port_enabled)
1849 efx->type->pull_stats(efx);
1850 spin_lock_bh(&efx->stats_lock);
1851 efx->type->update_stats(efx, NULL, NULL);
1852 spin_unlock_bh(&efx->stats_lock);
1853 efx->type->stop_stats(efx);
1854 ef4_stop_port(efx);
1860 WARN_ON(netif_running(efx->net_dev) &&
1861 netif_device_present(efx->net_dev));
1862 netif_tx_disable(efx->net_dev);
1864 ef4_stop_datapath(efx);
1867 static void ef4_remove_all(struct ef4_nic *efx)
1869 ef4_remove_channels(efx);
1870 ef4_remove_filters(efx);
1871 ef4_remove_port(efx);
1872 ef4_remove_nic(efx);
1880 unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1884 if (usecs * 1000 < efx->timer_quantum_ns)
1886 return usecs * 1000 / efx->timer_quantum_ns;
1889 unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
1894 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
1898 int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1905 EF4_ASSERT_RESET_SERIALISED(efx);
1907 timer_max_us = efx->timer_max_ns / 1000;
1912 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1914 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1919 efx->irq_rx_adaptive = rx_adaptive;
1920 efx->irq_rx_moderation_us = rx_usecs;
1921 ef4_for_each_channel(channel, efx) {
1931 void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1934 *rx_adaptive = efx->irq_rx_adaptive;
1935 *rx_usecs = efx->irq_rx_moderation_us;
1941 if (efx->tx_channel_offset == 0) {
1946 tx_channel = efx->channel[efx->tx_channel_offset];
1960 struct ef4_nic *efx = container_of(data, struct ef4_nic,
1963 netif_vdbg(efx, timer, efx->net_dev,
1966 BUG_ON(efx->type->monitor == NULL);
1971 if (mutex_trylock(&efx->mac_lock)) {
1972 if (efx->port_enabled)
1973 efx->type->monitor(efx);
1974 mutex_unlock(&efx->mac_lock);
1977 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1992 struct ef4_nic *efx = netdev_priv(net_dev);
2000 return mdio_mii_ioctl(&efx->mdio, data, cmd);
2011 struct ef4_nic *efx = channel->efx;
2013 channel->napi_dev = efx->net_dev;
2017 static void ef4_init_napi(struct ef4_nic *efx)
2021 ef4_for_each_channel(channel, efx)
2033 static void ef4_fini_napi(struct ef4_nic *efx)
2037 ef4_for_each_channel(channel, efx)
2050 struct ef4_nic *efx = netdev_priv(net_dev);
2053 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2056 rc = ef4_check_disabled(efx);
2059 if (efx->phy_mode & PHY_MODE_SPECIAL)
2064 ef4_link_status_changed(efx);
2066 ef4_start_all(efx);
2067 ef4_selftest_async_start(efx);
2077 struct ef4_nic *efx = netdev_priv(net_dev);
2079 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2083 ef4_stop_all(efx);
2092 struct ef4_nic *efx = netdev_priv(net_dev);
2094 spin_lock_bh(&efx->stats_lock);
2095 efx->type->update_stats(efx, NULL, stats);
2096 spin_unlock_bh(&efx->stats_lock);
2102 struct ef4_nic *efx = netdev_priv(net_dev);
2104 netif_err(efx, tx_err, efx->net_dev,
2106 efx->port_enabled);
2108 ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2115 struct ef4_nic *efx = netdev_priv(net_dev);
2118 rc = ef4_check_disabled(efx);
2122 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2124 ef4_device_detach_sync(efx);
2125 ef4_stop_all(efx);
2127 mutex_lock(&efx->mac_lock);
2129 ef4_mac_reconfigure(efx);
2130 mutex_unlock(&efx->mac_lock);
2132 ef4_start_all(efx);
2133 netif_device_attach(efx->net_dev);
2139 struct ef4_nic *efx = netdev_priv(net_dev);
2146 netif_err(efx, drv, efx->net_dev,
2155 if (efx->type->set_mac_address) {
2156 rc = efx->type->set_mac_address(efx);
2164 mutex_lock(&efx->mac_lock);
2165 ef4_mac_reconfigure(efx);
2166 mutex_unlock(&efx->mac_lock);
2174 struct ef4_nic *efx = netdev_priv(net_dev);
2176 if (efx->port_enabled)
2177 queue_work(efx->workqueue, &efx->mac_work);
2183 struct ef4_nic *efx = netdev_priv(net_dev);
2188 rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2222 static void ef4_update_name(struct ef4_nic *efx)
2224 strcpy(efx->name, efx->net_dev->name);
2225 ef4_mtd_rename(efx);
2226 ef4_set_channel_names(efx);
2248 struct ef4_nic *efx = dev_get_drvdata(dev);
2249 return sprintf(buf, "%d\n", efx->phy_type);
2253 static int ef4_register_netdev(struct ef4_nic *efx)
2255 struct net_device *net_dev = efx->net_dev;
2260 net_dev->irq = efx->pci_dev->irq;
2273 efx->state = STATE_READY;
2275 if (efx->reset_pending) {
2276 netif_err(efx, probe, efx->net_dev,
2285 ef4_update_name(efx);
2294 ef4_for_each_channel(channel, efx) {
2300 ef4_associate(efx);
2304 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2306 netif_err(efx, drv, efx->net_dev,
2314 ef4_dissociate(efx);
2317 efx->state = STATE_UNINIT;
2319 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2323 static void ef4_unregister_netdev(struct ef4_nic *efx)
2325 if (!efx->net_dev)
2328 BUG_ON(netdev_priv(efx->net_dev) != efx);
2330 if (ef4_dev_registered(efx)) {
2331 strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2332 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2333 unregister_netdev(efx->net_dev);
2345 void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2347 EF4_ASSERT_RESET_SERIALISED(efx);
2349 ef4_stop_all(efx);
2350 ef4_disable_interrupts(efx);
2352 mutex_lock(&efx->mac_lock);
2353 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2355 efx->phy_op->fini(efx);
2356 efx->type->fini(efx);
2364 int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2368 EF4_ASSERT_RESET_SERIALISED(efx);
2371 rc = efx->type->init(efx);
2373 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2380 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2382 rc = efx->phy_op->init(efx);
2385 rc = efx->phy_op->reconfigure(efx);
2387 netif_err(efx, drv, efx->net_dev,
2391 rc = ef4_enable_interrupts(efx);
2395 down_read(&efx->filter_sem);
2396 ef4_restore_filters(efx);
2397 up_read(&efx->filter_sem);
2399 mutex_unlock(&efx->mac_lock);
2401 ef4_start_all(efx);
2406 efx->port_initialized = false;
2408 mutex_unlock(&efx->mac_lock);
2418 int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2423 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2426 ef4_device_detach_sync(efx);
2427 ef4_reset_down(efx, method);
2429 rc = efx->type->reset(efx, method);
2431 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2439 efx->reset_pending &= -(1 << (method + 1));
2441 __clear_bit(method, &efx->reset_pending);
2447 pci_set_master(efx->pci_dev);
2454 rc2 = ef4_reset_up(efx, method, !disabled);
2462 dev_close(efx->net_dev);
2463 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2464 efx->state = STATE_DISABLED;
2466 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2467 netif_device_attach(efx->net_dev);
2477 int ef4_try_recovery(struct ef4_nic *efx)
2485 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2501 struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2505 pending = READ_ONCE(efx->reset_pending);
2510 ef4_try_recovery(efx))
2522 if (efx->state == STATE_READY)
2523 (void)ef4_reset(efx, method);
2528 void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2532 if (efx->state == STATE_RECOVERY) {
2533 netif_dbg(efx, drv, efx->net_dev,
2548 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2552 method = efx->type->map_reset_reason(type);
2553 netif_dbg(efx, drv, efx->net_dev,
2559 set_bit(method, &efx->reset_pending);
2565 if (READ_ONCE(efx->state) != STATE_READY)
2568 queue_work(reset_workqueue, &efx->reset_work);
2597 int ef4_port_dummy_op_int(struct ef4_nic *efx)
2601 void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2603 static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2624 static int ef4_init_struct(struct ef4_nic *efx,
2630 INIT_LIST_HEAD(&efx->node);
2631 INIT_LIST_HEAD(&efx->secondary_list);
2632 spin_lock_init(&efx->biu_lock);
2634 INIT_LIST_HEAD(&efx->mtd_list);
2636 INIT_WORK(&efx->reset_work, ef4_reset_work);
2637 INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2638 INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2639 efx->pci_dev = pci_dev;
2640 efx->msg_enable = debug;
2641 efx->state = STATE_UNINIT;
2642 strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2644 efx->net_dev = net_dev;
2645 efx->rx_prefix_size = efx->type->rx_prefix_size;
2646 efx->rx_ip_align =
2647 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2648 efx->rx_packet_hash_offset =
2649 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2650 efx->rx_packet_ts_offset =
2651 efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2652 spin_lock_init(&efx->stats_lock);
2653 mutex_init(&efx->mac_lock);
2654 efx->phy_op = &ef4_dummy_phy_operations;
2655 efx->mdio.dev = net_dev;
2656 INIT_WORK(&efx->mac_work, ef4_mac_work);
2657 init_waitqueue_head(&efx->flush_wq);
2660 efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2661 if (!efx->channel[i])
2663 efx->msi_context[i].efx = efx;
2664 efx->msi_context[i].index = i;
2668 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2672 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2674 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2675 if (!efx->workqueue)
2681 ef4_fini_struct(efx);
2685 static void ef4_fini_struct(struct ef4_nic *efx)
2690 kfree(efx->channel[i]);
2692 kfree(efx->vpd_sn);
2694 if (efx->workqueue) {
2695 destroy_workqueue(efx->workqueue);
2696 efx->workqueue = NULL;
2700 void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2705 ef4_for_each_channel(channel, efx)
2708 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2720 static void ef4_pci_remove_main(struct ef4_nic *efx)
2725 BUG_ON(efx->state == STATE_READY);
2726 cancel_work_sync(&efx->reset_work);
2728 ef4_disable_interrupts(efx);
2729 ef4_nic_fini_interrupt(efx);
2730 ef4_fini_port(efx);
2731 efx->type->fini(efx);
2732 ef4_fini_napi(efx);
2733 ef4_remove_all(efx);
2742 struct ef4_nic *efx;
2744 efx = pci_get_drvdata(pci_dev);
2745 if (!efx)
2750 ef4_dissociate(efx);
2751 dev_close(efx->net_dev);
2752 ef4_disable_interrupts(efx);
2753 efx->state = STATE_UNINIT;
2756 ef4_unregister_netdev(efx);
2758 ef4_mtd_remove(efx);
2760 ef4_pci_remove_main(efx);
2762 ef4_fini_io(efx);
2763 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2765 ef4_fini_struct(efx);
2766 free_netdev(efx->net_dev);
2772 static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2774 struct pci_dev *dev = efx->pci_dev;
2797 efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
2806 static int ef4_pci_probe_main(struct ef4_nic *efx)
2811 rc = ef4_probe_all(efx);
2815 ef4_init_napi(efx);
2817 rc = efx->type->init(efx);
2819 netif_err(efx, probe, efx->net_dev,
2824 rc = ef4_init_port(efx);
2826 netif_err(efx, probe, efx->net_dev,
2831 rc = ef4_nic_init_interrupt(efx);
2834 rc = ef4_enable_interrupts(efx);
2841 ef4_nic_fini_interrupt(efx);
2843 ef4_fini_port(efx);
2845 efx->type->fini(efx);
2847 ef4_fini_napi(efx);
2848 ef4_remove_all(efx);
2866 struct ef4_nic *efx;
2870 net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2874 efx = netdev_priv(net_dev);
2875 efx->type = (const struct ef4_nic_type *) entry->driver_data;
2876 efx->fixed_features |= NETIF_F_HIGHDMA;
2878 pci_set_drvdata(pci_dev, efx);
2880 rc = ef4_init_struct(efx, pci_dev, net_dev);
2884 netif_info(efx, probe, efx->net_dev,
2887 ef4_probe_vpd_strings(efx);
2890 rc = ef4_init_io(efx);
2894 rc = ef4_pci_probe_main(efx);
2898 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2904 net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2911 net_dev->features |= efx->fixed_features;
2913 rc = ef4_register_netdev(efx);
2917 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2921 rc = ef4_mtd_probe(efx);
2924 netif_warn(efx, probe, efx->net_dev,
2930 ef4_pci_remove_main(efx);
2932 ef4_fini_io(efx);
2934 ef4_fini_struct(efx);
2937 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2944 struct ef4_nic *efx = dev_get_drvdata(dev);
2948 if (efx->state != STATE_DISABLED) {
2949 efx->state = STATE_UNINIT;
2951 ef4_device_detach_sync(efx);
2953 ef4_stop_all(efx);
2954 ef4_disable_interrupts(efx);
2965 struct ef4_nic *efx = dev_get_drvdata(dev);
2969 if (efx->state != STATE_DISABLED) {
2970 rc = ef4_enable_interrupts(efx);
2974 mutex_lock(&efx->mac_lock);
2975 efx->phy_op->reconfigure(efx);
2976 mutex_unlock(&efx->mac_lock);
2978 ef4_start_all(efx);
2980 netif_device_attach(efx->net_dev);
2982 efx->state = STATE_READY;
2984 efx->type->resume_wol(efx);
2990 queue_work(reset_workqueue, &efx->reset_work);
3003 struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3005 efx->type->fini(efx);
3007 efx->reset_pending = 0;
3017 struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3027 pci_set_master(efx->pci_dev);
3028 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3031 rc = efx->type->init(efx);
3066 struct ef4_nic *efx = pci_get_drvdata(pdev);
3073 if (efx->state != STATE_DISABLED) {
3074 efx->state = STATE_RECOVERY;
3075 efx->reset_pending = 0;
3077 ef4_device_detach_sync(efx);
3079 ef4_stop_all(efx);
3080 ef4_disable_interrupts(efx);
3100 struct ef4_nic *efx = pci_get_drvdata(pdev);
3104 netif_err(efx, hw, efx->net_dev,
3115 struct ef4_nic *efx = pci_get_drvdata(pdev);
3120 if (efx->state == STATE_DISABLED)
3123 rc = ef4_reset(efx, RESET_TYPE_ALL);
3125 netif_err(efx, hw, efx->net_dev,
3128 efx->state = STATE_READY;
3129 netif_dbg(efx, hw, efx->net_dev,