Lines Matching defs:efx
17 #include "efx.h"
88 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
91 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
102 int efx_farch_test_registers(struct efx_nic *efx,
115 efx_reado(efx, &original, address);
126 efx_writeo(efx, ®, address);
127 efx_reado(efx, &buf, address);
136 efx_writeo(efx, ®, address);
137 efx_reado(efx, &buf, address);
143 efx_writeo(efx, &original, address);
149 netif_err(efx, hw, efx->net_dev,
172 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
185 netif_dbg(efx, probe, efx->net_dev,
192 efx_write_buf_tbl(efx, &buf_desc, index);
198 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
207 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
215 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
227 static int efx_alloc_special_buffer(struct efx_nic *efx,
232 struct siena_nic_data *nic_data = efx->nic_data;
236 if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
242 buffer->index = efx->next_buffer_table;
243 efx->next_buffer_table += buffer->entries;
245 BUG_ON(efx_siena_sriov_enabled(efx) &&
246 nic_data->vf_buftbl_base < efx->next_buffer_table);
249 netif_dbg(efx, probe, efx->net_dev,
260 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
265 netif_dbg(efx, hw, efx->net_dev,
272 efx_siena_free_buffer(efx, &buffer->buf);
290 efx_writed_page(tx_queue->efx, ®,
308 efx_writeo_page(tx_queue->efx, ®,
373 struct efx_nic *efx = tx_queue->efx;
379 return efx_alloc_special_buffer(efx, &tx_queue->txd,
386 struct efx_nic *efx = tx_queue->efx;
390 efx_init_special_buffer(efx, &tx_queue->txd);
410 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
418 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue);
425 struct efx_nic *efx = tx_queue->efx;
434 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
439 struct efx_nic *efx = tx_queue->efx;
444 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
448 efx_fini_special_buffer(efx, &tx_queue->txd);
454 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
475 rx_queue->efx->type->rx_buffer_padding,
485 struct efx_nic *efx = rx_queue->efx;
499 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
505 struct efx_nic *efx = rx_queue->efx;
509 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
516 struct efx_nic *efx = rx_queue->efx;
520 jumbo_en = efx->rx_scatter;
522 netif_dbg(efx, hw, efx->net_dev,
530 efx_init_special_buffer(efx, &rx_queue->rxd);
547 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
553 struct efx_nic *efx = rx_queue->efx;
560 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
566 struct efx_nic *efx = rx_queue->efx;
570 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
574 efx_fini_special_buffer(efx, &rx_queue->rxd);
580 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
592 static bool efx_farch_flush_wake(struct efx_nic *efx)
597 return (atomic_read(&efx->active_queues) == 0 ||
598 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
599 && atomic_read(&efx->rxq_flush_pending) > 0));
602 static bool efx_check_tx_flush_complete(struct efx_nic *efx)
609 efx_for_each_channel(channel, efx) {
611 efx_reado_table(efx, &txd_ptr_tbl,
617 netif_dbg(efx, hw, efx->net_dev,
626 netif_dbg(efx, hw, efx->net_dev,
646 static int efx_farch_do_flush(struct efx_nic *efx)
654 efx_for_each_channel(channel, efx) {
660 atomic_inc(&efx->rxq_flush_pending);
664 while (timeout && atomic_read(&efx->active_queues) > 0) {
669 if (efx_siena_sriov_enabled(efx)) {
670 rc = efx_siena_mcdi_flush_rxqs(efx);
679 efx_for_each_channel(channel, efx) {
681 if (atomic_read(&efx->rxq_flush_outstanding) >=
687 atomic_dec(&efx->rxq_flush_pending);
688 atomic_inc(&efx->rxq_flush_outstanding);
695 timeout = wait_event_timeout(efx->flush_wq,
696 efx_farch_flush_wake(efx),
700 if (atomic_read(&efx->active_queues) &&
701 !efx_check_tx_flush_complete(efx)) {
702 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
703 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
704 atomic_read(&efx->rxq_flush_outstanding),
705 atomic_read(&efx->rxq_flush_pending));
708 atomic_set(&efx->active_queues, 0);
709 atomic_set(&efx->rxq_flush_pending, 0);
710 atomic_set(&efx->rxq_flush_outstanding, 0);
716 int efx_farch_fini_dmaq(struct efx_nic *efx)
724 if (efx->state != STATE_RECOVERY) {
726 if (efx->pci_dev->is_busmaster) {
727 efx->type->prepare_flush(efx);
728 rc = efx_farch_do_flush(efx);
729 efx->type->finish_flush(efx);
732 efx_for_each_channel(channel, efx) {
747 * completion events. This means that efx->rxq_flush_outstanding remained at 4
748 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
751 * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
752 * of 4 for batched flush requests; and the efx->active_queues gets messed up
758 void efx_farch_finish_flr(struct efx_nic *efx)
760 atomic_set(&efx->rxq_flush_pending, 0);
761 atomic_set(&efx->rxq_flush_outstanding, 0);
762 atomic_set(&efx->active_queues, 0);
781 struct efx_nic *efx = channel->efx;
789 efx_writed(efx, ®,
790 efx->type->evq_rptr_tbl_base +
795 void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
807 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
817 efx_farch_generate_event(channel->efx, channel->channel, &event);
831 struct efx_nic *efx = channel->efx;
833 if (unlikely(READ_ONCE(efx->reset_pending)))
849 netif_tx_lock(efx->net_dev);
851 netif_tx_unlock(efx->net_dev);
853 efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
855 netif_err(efx, tx_err, efx->net_dev,
867 struct efx_nic *efx = rx_queue->efx;
895 else if (!efx->loopback_selftest) {
908 netif_dbg(efx, rx_err, efx->net_dev,
926 if (efx->net_dev->features & NETIF_F_RXALL)
944 struct efx_nic *efx = rx_queue->efx;
956 netif_info(efx, rx_err, efx->net_dev,
960 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
980 struct efx_nic *efx = channel->efx;
982 if (unlikely(READ_ONCE(efx->reset_pending)))
1083 efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1090 if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
1091 channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
1104 efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1113 if (qid >= efx->n_channels)
1115 channel = efx_get_channel(efx, qid);
1121 netif_info(efx, hw, efx->net_dev,
1124 atomic_inc(&efx->rxq_flush_pending);
1129 atomic_dec(&efx->rxq_flush_outstanding);
1130 if (efx_farch_flush_wake(efx))
1131 wake_up(&efx->flush_wq);
1137 struct efx_nic *efx = channel->efx;
1139 WARN_ON(atomic_read(&efx->active_queues) == 0);
1140 atomic_dec(&efx->active_queues);
1141 if (efx_farch_flush_wake(efx))
1142 wake_up(&efx->flush_wq);
1148 struct efx_nic *efx = channel->efx;
1169 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1178 struct efx_nic *efx = channel->efx;
1187 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1189 efx_farch_handle_tx_flush_done(efx, event);
1191 efx_siena_sriov_tx_flush_done(efx, event);
1195 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1197 efx_farch_handle_rx_flush_done(efx, event);
1199 efx_siena_sriov_rx_flush_done(efx, event);
1203 netif_dbg(efx, hw, efx->net_dev,
1208 netif_vdbg(efx, hw, efx->net_dev,
1212 netif_vdbg(efx, hw, efx->net_dev,
1217 netif_vdbg(efx, hw, efx->net_dev,
1222 netif_err(efx, rx_err, efx->net_dev,
1225 atomic_inc(&efx->rx_reset);
1226 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
1230 netif_err(efx, rx_err, efx->net_dev,
1234 efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1238 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1243 netif_err(efx, tx_err, efx->net_dev,
1247 efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1251 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1255 netif_vdbg(efx, hw, efx->net_dev,
1265 struct efx_nic *efx = channel->efx;
1284 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1319 if (efx->type->handle_global_event &&
1320 efx->type->handle_global_event(channel, &event))
1324 netif_err(channel->efx, hw, channel->efx->net_dev,
1339 struct efx_nic *efx = channel->efx;
1343 return efx_alloc_special_buffer(efx, &channel->eventq,
1350 struct efx_nic *efx = channel->efx;
1352 netif_dbg(efx, hw, efx->net_dev,
1361 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1364 efx_init_special_buffer(efx, &channel->eventq);
1374 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1383 struct efx_nic *efx = channel->efx;
1387 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1389 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1392 efx_fini_special_buffer(efx, &channel->eventq);
1398 efx_free_special_buffer(channel->efx, &channel->eventq);
1422 static inline void efx_farch_interrupts(struct efx_nic *efx,
1428 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1431 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1434 void efx_farch_irq_enable_master(struct efx_nic *efx)
1436 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1439 efx_farch_interrupts(efx, true, false);
1442 void efx_farch_irq_disable_master(struct efx_nic *efx)
1445 efx_farch_interrupts(efx, false, false);
1452 int efx_farch_irq_test_generate(struct efx_nic *efx)
1454 efx_farch_interrupts(efx, true, true);
1461 irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1463 efx_oword_t *int_ker = efx->irq_status.addr;
1467 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1470 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1480 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1481 netif_err(efx, hw, efx->net_dev,
1487 pci_clear_master(efx->pci_dev);
1488 efx_farch_irq_disable_master(efx);
1491 if (efx->int_error_count == 0 ||
1492 time_after(jiffies, efx->int_error_expire)) {
1493 efx->int_error_count = 0;
1494 efx->int_error_expire =
1497 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1498 netif_err(efx, hw, efx->net_dev,
1500 efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1502 netif_err(efx, hw, efx->net_dev,
1505 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
1516 struct efx_nic *efx = dev_id;
1517 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1518 efx_oword_t *int_ker = efx->irq_status.addr;
1526 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1533 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) &&
1534 !efx->eeh_disabled_legacy_irq) {
1535 disable_irq_nosync(efx->legacy_irq);
1536 efx->eeh_disabled_legacy_irq = true;
1540 if (queues & (1U << efx->irq_level) && soft_enabled) {
1543 return efx_farch_fatal_interrupt(efx);
1544 efx->last_irq_cpu = raw_smp_processor_id();
1548 efx->irq_zero_count = 0;
1552 efx_for_each_channel(channel, efx) {
1567 if (efx->irq_zero_count++ == 0)
1572 efx_for_each_channel(channel, efx) {
1584 netif_vdbg(efx, intr, efx->net_dev,
1601 struct efx_nic *efx = context->efx;
1602 efx_oword_t *int_ker = efx->irq_status.addr;
1605 netif_vdbg(efx, intr, efx->net_dev,
1609 if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1613 if (context->index == efx->irq_level) {
1616 return efx_farch_fatal_interrupt(efx);
1617 efx->last_irq_cpu = raw_smp_processor_id();
1621 efx_schedule_channel_irq(efx->channel[context->index]);
1629 void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1634 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1639 efx->rss_context.rx_indir_table[i]);
1640 efx_writed(efx, &dword,
1646 void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
1651 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1655 efx_readd(efx, &dword,
1658 efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
1667 * efx->vf_buftbl_base buftbl entries for SR-IOV
1668 * efx->rx_dc_base RX descriptor caches
1669 * efx->tx_dc_base TX descriptor caches
1671 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1679 total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
1680 vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
1683 nic_data = efx->nic_data;
1687 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1689 efx->n_channels * EFX_MAX_EVQ_SIZE)
1691 if (efx->type->sriov_wanted) {
1692 if (efx->type->sriov_wanted(efx)) {
1705 efx_vf_size(efx));
1707 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1709 if (efx->vf_count > vf_limit) {
1710 netif_err(efx, probe, efx->net_dev,
1712 efx->vf_count, vf_limit);
1713 efx->vf_count = vf_limit;
1715 vi_count += efx->vf_count * efx_vf_size(efx);
1720 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1721 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1724 u32 efx_farch_fpga_ver(struct efx_nic *efx)
1727 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1731 void efx_farch_init_common(struct efx_nic *efx)
1736 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1737 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1738 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1739 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1744 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1751 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1753 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1758 EFX_INT_MODE_USE_MSI(efx),
1759 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1760 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1762 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1764 efx->irq_level = 0x1f;
1767 efx->irq_level = 0;
1781 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1786 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1800 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1810 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1888 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1935 static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1937 struct efx_farch_filter_state *state = efx->filter_state;
1941 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
2003 efx->rx_scatter);
2006 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
2009 static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
2011 struct efx_farch_filter_state *state = efx->filter_state;
2015 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
2029 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2219 efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2227 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2228 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2371 u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2373 struct efx_farch_filter_state *state = efx->filter_state;
2387 s32 efx_farch_filter_insert(struct efx_nic *efx,
2391 struct efx_farch_filter_state *state = efx->filter_state;
2411 netif_vdbg(efx, hw, efx->net_dev,
2508 efx_farch_filter_push_rx_config(efx);
2513 efx_farch_filter_push_tx_limits(efx);
2515 efx_farch_filter_push_rx_config(efx);
2518 efx_writeo(efx, &filter,
2525 efx_farch_filter_table_clear_entry(efx, table,
2529 netif_vdbg(efx, hw, efx->net_dev,
2540 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2553 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2564 efx_farch_filter_push_tx_limits(efx);
2566 efx_farch_filter_push_rx_config(efx);
2570 static int efx_farch_filter_remove(struct efx_nic *efx,
2582 efx_farch_filter_init_rx_auto(efx, spec);
2583 efx_farch_filter_push_rx_config(efx);
2585 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2591 int efx_farch_filter_remove_safe(struct efx_nic *efx,
2595 struct efx_farch_filter_state *state = efx->filter_state;
2611 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2617 int efx_farch_filter_get_safe(struct efx_nic *efx,
2621 struct efx_farch_filter_state *state = efx->filter_state;
2652 efx_farch_filter_table_clear(struct efx_nic *efx,
2656 struct efx_farch_filter_state *state = efx->filter_state;
2663 efx_farch_filter_remove(efx, table,
2669 int efx_farch_filter_clear_rx(struct efx_nic *efx,
2672 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2674 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2676 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2681 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2684 struct efx_farch_filter_state *state = efx->filter_state;
2708 s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2712 struct efx_farch_filter_state *state = efx->filter_state;
2743 void efx_farch_filter_table_restore(struct efx_nic *efx)
2745 struct efx_farch_filter_state *state = efx->filter_state;
2764 efx_writeo(efx, &filter,
2769 efx_farch_filter_push_rx_config(efx);
2770 efx_farch_filter_push_tx_limits(efx);
2775 void efx_farch_filter_table_remove(struct efx_nic *efx)
2777 struct efx_farch_filter_state *state = efx->filter_state;
2787 int efx_farch_filter_table_probe(struct efx_nic *efx)
2796 efx->filter_state = state;
2843 efx_farch_filter_init_rx_auto(efx, spec);
2848 efx_farch_filter_push_rx_config(efx);
2853 efx_farch_filter_table_remove(efx);
2858 void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2860 struct efx_farch_filter_state *state = efx->filter_state;
2876 efx->n_rx_channels)
2879 if (efx->rx_scatter)
2891 efx_writeo(efx, &filter,
2896 efx_farch_filter_push_rx_config(efx);
2903 bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2906 struct efx_farch_filter_state *state = efx->filter_state;
2912 spin_lock_bh(&efx->rps_hash_lock);
2920 if (!efx->rps_hash_table) {
2926 rule = efx_siena_rps_hash_find(efx, &spec);
2937 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2941 efx_siena_rps_hash_del(efx, &spec);
2942 efx_farch_filter_table_clear_entry(efx, table, index);
2947 spin_unlock_bh(&efx->rps_hash_lock);
2954 void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2956 struct net_device *net_dev = efx->net_dev;
2958 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2962 if (!efx_dev_registered(efx))
2967 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);