Lines Matching defs:efx

17 #include "efx.h"
84 static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
87 ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
98 int ef4_farch_test_registers(struct ef4_nic *efx,
111 ef4_reado(efx, &original, address);
122 ef4_writeo(efx, &reg, address);
123 ef4_reado(efx, &buf, address);
132 ef4_writeo(efx, &reg, address);
133 ef4_reado(efx, &buf, address);
139 ef4_writeo(efx, &original, address);
145 netif_err(efx, hw, efx->net_dev,
168 ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
181 netif_dbg(efx, probe, efx->net_dev,
188 ef4_write_buf_tbl(efx, &buf_desc, index);
194 ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
203 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
211 ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
223 static int ef4_alloc_special_buffer(struct ef4_nic *efx,
229 if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
235 buffer->index = efx->next_buffer_table;
236 efx->next_buffer_table += buffer->entries;
238 netif_dbg(efx, probe, efx->net_dev,
249 ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
254 netif_dbg(efx, hw, efx->net_dev,
261 ef4_nic_free_buffer(efx, &buffer->buf);
279 ef4_writed_page(tx_queue->efx, &reg,
297 ef4_writeo_page(tx_queue->efx, &reg,
355 if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
365 struct ef4_nic *efx = tx_queue->efx;
369 return ef4_alloc_special_buffer(efx, &tx_queue->txd,
375 struct ef4_nic *efx = tx_queue->efx;
379 ef4_init_special_buffer(efx, &tx_queue->txd);
396 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
403 ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
406 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
410 ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
415 ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
418 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
424 ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
431 struct ef4_nic *efx = tx_queue->efx;
440 ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
445 struct ef4_nic *efx = tx_queue->efx;
450 ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
454 ef4_fini_special_buffer(efx, &tx_queue->txd);
460 ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
481 rx_queue->efx->type->rx_buffer_padding,
491 struct ef4_nic *efx = rx_queue->efx;
505 ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
511 struct ef4_nic *efx = rx_queue->efx;
515 return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
522 struct ef4_nic *efx = rx_queue->efx;
523 bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
532 jumbo_en = !is_b0 || efx->rx_scatter;
534 netif_dbg(efx, hw, efx->net_dev,
542 ef4_init_special_buffer(efx, &rx_queue->rxd);
559 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
565 struct ef4_nic *efx = rx_queue->efx;
572 ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
578 struct ef4_nic *efx = rx_queue->efx;
582 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
586 ef4_fini_special_buffer(efx, &rx_queue->rxd);
592 ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
604 static bool ef4_farch_flush_wake(struct ef4_nic *efx)
609 return (atomic_read(&efx->active_queues) == 0 ||
610 (atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
611 && atomic_read(&efx->rxq_flush_pending) > 0));
614 static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
621 ef4_for_each_channel(channel, efx) {
623 ef4_reado_table(efx, &txd_ptr_tbl,
629 netif_dbg(efx, hw, efx->net_dev,
638 netif_dbg(efx, hw, efx->net_dev,
658 static int ef4_farch_do_flush(struct ef4_nic *efx)
666 ef4_for_each_channel(channel, efx) {
672 atomic_inc(&efx->rxq_flush_pending);
676 while (timeout && atomic_read(&efx->active_queues) > 0) {
681 ef4_for_each_channel(channel, efx) {
683 if (atomic_read(&efx->rxq_flush_outstanding) >=
689 atomic_dec(&efx->rxq_flush_pending);
690 atomic_inc(&efx->rxq_flush_outstanding);
696 timeout = wait_event_timeout(efx->flush_wq,
697 ef4_farch_flush_wake(efx),
701 if (atomic_read(&efx->active_queues) &&
702 !ef4_check_tx_flush_complete(efx)) {
703 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
704 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
705 atomic_read(&efx->rxq_flush_outstanding),
706 atomic_read(&efx->rxq_flush_pending));
709 atomic_set(&efx->active_queues, 0);
710 atomic_set(&efx->rxq_flush_pending, 0);
711 atomic_set(&efx->rxq_flush_outstanding, 0);
717 int ef4_farch_fini_dmaq(struct ef4_nic *efx)
725 if (efx->state != STATE_RECOVERY) {
727 if (efx->pci_dev->is_busmaster) {
728 efx->type->prepare_flush(efx);
729 rc = ef4_farch_do_flush(efx);
730 efx->type->finish_flush(efx);
733 ef4_for_each_channel(channel, efx) {
748 * completion events. This means that efx->rxq_flush_outstanding remained at 4
749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753 * for batched flush requests; and the efx->active_queues gets messed up because
758 void ef4_farch_finish_flr(struct ef4_nic *efx)
760 atomic_set(&efx->rxq_flush_pending, 0);
761 atomic_set(&efx->rxq_flush_outstanding, 0);
762 atomic_set(&efx->active_queues, 0);
781 struct ef4_nic *efx = channel->efx;
789 ef4_writed(efx, &reg,
790 efx->type->evq_rptr_tbl_base +
795 void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
807 ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
817 ef4_farch_generate_event(channel->efx, channel->channel, &event);
831 struct ef4_nic *efx = channel->efx;
834 if (unlikely(READ_ONCE(efx->reset_pending)))
852 netif_tx_lock(efx->net_dev);
854 netif_tx_unlock(efx->net_dev);
856 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
858 netif_err(efx, tx_err, efx->net_dev,
872 struct ef4_nic *efx = rx_queue->efx;
887 rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
898 else if (!efx->loopback_selftest) {
918 netif_dbg(efx, rx_err, efx->net_dev,
950 struct ef4_nic *efx = rx_queue->efx;
962 netif_info(efx, rx_err, efx->net_dev,
966 ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
987 struct ef4_nic *efx = channel->efx;
989 if (unlikely(READ_ONCE(efx->reset_pending)))
1090 ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1096 if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
1097 tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
1111 ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1120 if (qid >= efx->n_channels)
1122 channel = ef4_get_channel(efx, qid);
1128 netif_info(efx, hw, efx->net_dev,
1131 atomic_inc(&efx->rxq_flush_pending);
1136 atomic_dec(&efx->rxq_flush_outstanding);
1137 if (ef4_farch_flush_wake(efx))
1138 wake_up(&efx->flush_wq);
1144 struct ef4_nic *efx = channel->efx;
1146 WARN_ON(atomic_read(&efx->active_queues) == 0);
1147 atomic_dec(&efx->active_queues);
1148 if (ef4_farch_flush_wake(efx))
1149 wake_up(&efx->flush_wq);
1155 struct ef4_nic *efx = channel->efx;
1176 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1185 struct ef4_nic *efx = channel->efx;
1194 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1196 ef4_farch_handle_tx_flush_done(efx, event);
1199 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1201 ef4_farch_handle_rx_flush_done(efx, event);
1204 netif_dbg(efx, hw, efx->net_dev,
1209 netif_vdbg(efx, hw, efx->net_dev,
1213 netif_vdbg(efx, hw, efx->net_dev,
1218 netif_vdbg(efx, hw, efx->net_dev,
1223 netif_err(efx, rx_err, efx->net_dev,
1226 atomic_inc(&efx->rx_reset);
1227 ef4_schedule_reset(efx,
1228 EF4_WORKAROUND_6555(efx) ?
1233 netif_err(efx, rx_err, efx->net_dev,
1237 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1240 netif_err(efx, tx_err, efx->net_dev,
1244 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1247 netif_vdbg(efx, hw, efx->net_dev,
1257 struct ef4_nic *efx = channel->efx;
1277 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1297 if (tx_packets > efx->txq_entries) {
1309 if (efx->type->handle_global_event &&
1310 efx->type->handle_global_event(channel, &event))
1314 netif_err(channel->efx, hw, channel->efx->net_dev,
1329 struct ef4_nic *efx = channel->efx;
1333 return ef4_alloc_special_buffer(efx, &channel->eventq,
1340 struct ef4_nic *efx = channel->efx;
1342 netif_dbg(efx, hw, efx->net_dev,
1348 ef4_init_special_buffer(efx, &channel->eventq);
1358 ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1367 struct ef4_nic *efx = channel->efx;
1371 ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1375 ef4_fini_special_buffer(efx, &channel->eventq);
1381 ef4_free_special_buffer(channel->efx, &channel->eventq);
1405 static inline void ef4_farch_interrupts(struct ef4_nic *efx,
1411 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1414 ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1417 void ef4_farch_irq_enable_master(struct ef4_nic *efx)
1419 EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
1422 ef4_farch_interrupts(efx, true, false);
1425 void ef4_farch_irq_disable_master(struct ef4_nic *efx)
1428 ef4_farch_interrupts(efx, false, false);
1435 int ef4_farch_irq_test_generate(struct ef4_nic *efx)
1437 ef4_farch_interrupts(efx, true, true);
1444 irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
1446 struct falcon_nic_data *nic_data = efx->nic_data;
1447 ef4_oword_t *int_ker = efx->irq_status.addr;
1451 ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1454 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
1464 ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
1465 netif_err(efx, hw, efx->net_dev,
1471 pci_clear_master(efx->pci_dev);
1472 if (ef4_nic_is_dual_func(efx))
1474 ef4_farch_irq_disable_master(efx);
1477 if (efx->int_error_count == 0 ||
1478 time_after(jiffies, efx->int_error_expire)) {
1479 efx->int_error_count = 0;
1480 efx->int_error_expire =
1483 if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
1484 netif_err(efx, hw, efx->net_dev,
1486 ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1488 netif_err(efx, hw, efx->net_dev,
1491 ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
1502 struct ef4_nic *efx = dev_id;
1503 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1504 ef4_oword_t *int_ker = efx->irq_status.addr;
1512 ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
1519 if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
1520 !efx->eeh_disabled_legacy_irq) {
1521 disable_irq_nosync(efx->legacy_irq);
1522 efx->eeh_disabled_legacy_irq = true;
1526 if (queues & (1U << efx->irq_level) && soft_enabled) {
1529 return ef4_farch_fatal_interrupt(efx);
1530 efx->last_irq_cpu = raw_smp_processor_id();
1534 efx->irq_zero_count = 0;
1538 ef4_for_each_channel(channel, efx) {
1553 if (efx->irq_zero_count++ == 0)
1558 ef4_for_each_channel(channel, efx) {
1570 netif_vdbg(efx, intr, efx->net_dev,
1587 struct ef4_nic *efx = context->efx;
1588 ef4_oword_t *int_ker = efx->irq_status.addr;
1591 netif_vdbg(efx, intr, efx->net_dev,
1595 if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1599 if (context->index == efx->irq_level) {
1602 return ef4_farch_fatal_interrupt(efx);
1603 efx->last_irq_cpu = raw_smp_processor_id();
1607 ef4_schedule_channel_irq(efx->channel[context->index]);
1615 void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
1620 BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
1622 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1627 efx->rx_indir_table[i]);
1628 ef4_writed(efx, &dword,
1639 * efx->vf_buftbl_base buftbl entries for SR-IOV
1640 * efx->rx_dc_base RX descriptor caches
1641 * efx->tx_dc_base TX descriptor caches
1643 void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
1650 vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
1652 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1653 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1656 u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
1659 ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1663 void ef4_farch_init_common(struct ef4_nic *efx)
1668 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1669 ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1670 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1671 ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1676 ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1683 ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1685 ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1690 EF4_INT_MODE_USE_MSI(efx),
1691 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1692 ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1695 efx->irq_level = 0;
1708 ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1713 ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
1726 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1728 ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1730 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1740 ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
1818 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1865 static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
1867 struct ef4_farch_filter_state *state = efx->filter_state;
1871 ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1925 } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1933 efx->rx_scatter);
1936 ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1939 static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
1941 struct ef4_farch_filter_state *state = efx->filter_state;
1945 ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1959 ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2150 ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
2158 (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
2159 (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
2302 u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
2304 struct ef4_farch_filter_state *state = efx->filter_state;
2318 s32 ef4_farch_filter_insert(struct ef4_nic *efx,
2322 struct ef4_farch_filter_state *state = efx->filter_state;
2338 netif_vdbg(efx, hw, efx->net_dev,
2350 spin_lock_bh(&efx->filter_lock);
2380 spin_lock_bh(&efx->filter_lock);
2439 ef4_farch_filter_push_rx_config(efx);
2444 ef4_farch_filter_push_tx_limits(efx);
2446 ef4_farch_filter_push_rx_config(efx);
2449 ef4_writeo(efx, &filter,
2456 ef4_farch_filter_table_clear_entry(efx, table,
2460 netif_vdbg(efx, hw, efx->net_dev,
2466 spin_unlock_bh(&efx->filter_lock);
2471 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
2484 ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
2495 ef4_farch_filter_push_tx_limits(efx);
2497 ef4_farch_filter_push_rx_config(efx);
2501 static int ef4_farch_filter_remove(struct ef4_nic *efx,
2513 ef4_farch_filter_init_rx_auto(efx, spec);
2514 ef4_farch_filter_push_rx_config(efx);
2516 ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
2522 int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
2526 struct ef4_farch_filter_state *state = efx->filter_state;
2541 spin_lock_bh(&efx->filter_lock);
2542 rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
2543 spin_unlock_bh(&efx->filter_lock);
2548 int ef4_farch_filter_get_safe(struct ef4_nic *efx,
2552 struct ef4_farch_filter_state *state = efx->filter_state;
2569 spin_lock_bh(&efx->filter_lock);
2579 spin_unlock_bh(&efx->filter_lock);
2585 ef4_farch_filter_table_clear(struct ef4_nic *efx,
2589 struct ef4_farch_filter_state *state = efx->filter_state;
2593 spin_lock_bh(&efx->filter_lock);
2596 ef4_farch_filter_remove(efx, table,
2599 spin_unlock_bh(&efx->filter_lock);
2602 int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
2605 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
2607 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
2609 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
2614 u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
2617 struct ef4_farch_filter_state *state = efx->filter_state;
2623 spin_lock_bh(&efx->filter_lock);
2636 spin_unlock_bh(&efx->filter_lock);
2641 s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
2645 struct ef4_farch_filter_state *state = efx->filter_state;
2651 spin_lock_bh(&efx->filter_lock);
2670 spin_unlock_bh(&efx->filter_lock);
2676 void ef4_farch_filter_table_restore(struct ef4_nic *efx)
2678 struct ef4_farch_filter_state *state = efx->filter_state;
2684 spin_lock_bh(&efx->filter_lock);
2697 ef4_writeo(efx, &filter,
2702 ef4_farch_filter_push_rx_config(efx);
2703 ef4_farch_filter_push_tx_limits(efx);
2705 spin_unlock_bh(&efx->filter_lock);
2708 void ef4_farch_filter_table_remove(struct ef4_nic *efx)
2710 struct ef4_farch_filter_state *state = efx->filter_state;
2720 int ef4_farch_filter_table_probe(struct ef4_nic *efx)
2729 efx->filter_state = state;
2731 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2763 ef4_farch_filter_init_rx_auto(efx, spec);
2768 ef4_farch_filter_push_rx_config(efx);
2773 ef4_farch_filter_table_remove(efx);
2778 void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
2780 struct ef4_farch_filter_state *state = efx->filter_state;
2786 spin_lock_bh(&efx->filter_lock);
2796 efx->n_rx_channels)
2799 if (efx->rx_scatter)
2811 ef4_writeo(efx, &filter,
2816 ef4_farch_filter_push_rx_config(efx);
2818 spin_unlock_bh(&efx->filter_lock);
2823 s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
2826 return ef4_farch_filter_insert(efx, gen_spec, true);
2829 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
2832 struct ef4_farch_filter_state *state = efx->filter_state;
2838 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2840 ef4_farch_filter_table_clear_entry(efx, table, index);
2849 void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
2851 struct net_device *net_dev = efx->net_dev;
2853 union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
2857 if (!ef4_dev_registered(efx))
2862 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);